get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132189/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132189,
    "url": "http://patches.dpdk.org/api/patches/132189/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230929081309.464565-13-amitprakashs@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230929081309.464565-13-amitprakashs@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230929081309.464565-13-amitprakashs@marvell.com",
    "date": "2023-09-29T08:13:09",
    "name": "[v7,12/12] app/test: add event DMA adapter auto-test",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "fc33c5208ff12e8729f350d769a756234c08cd34",
    "submitter": {
        "id": 2699,
        "url": "http://patches.dpdk.org/api/people/2699/?format=api",
        "name": "Amit Prakash Shukla",
        "email": "amitprakashs@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230929081309.464565-13-amitprakashs@marvell.com/mbox/",
    "series": [
        {
            "id": 29691,
            "url": "http://patches.dpdk.org/api/series/29691/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29691",
            "date": "2023-09-29T08:12:57",
            "name": "event DMA adapter library support",
            "version": 7,
            "mbox": "http://patches.dpdk.org/series/29691/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/132189/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/132189/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 219DE42671;\n\tFri, 29 Sep 2023 10:15:09 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 571324069F;\n\tFri, 29 Sep 2023 10:15:05 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id D396440277\n for <dev@dpdk.org>; Fri, 29 Sep 2023 10:15:03 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 38T273cw031730; Fri, 29 Sep 2023 01:15:03 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3td7y6ve3u-2\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Fri, 29 Sep 2023 01:15:02 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Fri, 29 Sep 2023 01:14:56 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Fri, 29 Sep 2023 01:14:56 -0700",
            "from localhost.localdomain (unknown [10.28.36.157])\n by maili.marvell.com (Postfix) with ESMTP id 532623F706F;\n Fri, 29 Sep 2023 01:14:52 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=bdIoxwP76CdLKEwbzrVaBBxJUkNnLwzX7vMtDys7jZ0=;\n b=TObRbPj9SntvqR6c7hSce5mWLuxZQV/lyWbQytXCVXDYPSia996eYxEGsw9Y1UivjhZw\n vY2obMfhIR+CQK/u8U4imPJzH+odcyy+dH2oiwZuZos/GAvdHvTpCUpFfQ+blZXFYuF9\n Nx0UW/4JYqWX3/uTMFbBVVesp9W/3xpQUcAPY5wB5cwJAXuzH3UKAayE2ccSAh4iCl4p\n oAwlVHDWpBWJ2pya3NYecm/IT4z7VPxYbABS7XHC+sKFknV/7u0hWDdkhmJhhApB0f+B\n W2OdImu/L4GHiGNFJ0GF5Fs8tC15bqfYrqN8iFmsXkqKDukR7WfLhvLoNiQ2gHm5MrjP Kw==",
        "From": "Amit Prakash Shukla <amitprakashs@marvell.com>",
        "To": "Thomas Monjalon <thomas@monjalon.net>, Amit Prakash Shukla\n <amitprakashs@marvell.com>",
        "CC": "<dev@dpdk.org>, <jerinj@marvell.com>, <fengchengwen@huawei.com>,\n <kevin.laatz@intel.com>, <bruce.richardson@intel.com>,\n <conor.walsh@intel.com>, <vattunuru@marvell.com>, <g.singh@nxp.com>,\n <sachin.saxena@oss.nxp.com>, <hemant.agrawal@nxp.com>,\n <cheng1.jiang@intel.com>, <ndabilpuram@marvell.com>,\n <anoobj@marvell.com>, <mb@smartsharesystems.com>",
        "Subject": "[PATCH v7 12/12] app/test: add event DMA adapter auto-test",
        "Date": "Fri, 29 Sep 2023 13:43:09 +0530",
        "Message-ID": "<20230929081309.464565-13-amitprakashs@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230929081309.464565-1-amitprakashs@marvell.com>",
        "References": "<20230928164959.340575-1-amitprakashs@marvell.com>\n <20230929081309.464565-1-amitprakashs@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "qhZ33cnKx6eyWaSz8VQpgxvjY6pG-PKY",
        "X-Proofpoint-GUID": "qhZ33cnKx6eyWaSz8VQpgxvjY6pG-PKY",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-09-29_06,2023-09-28_03,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Added testsuite to test the dma adapter functionality.\nThe testsuite detects event and DMA device capability\nand accordingly dma adapter is configured and modes are\ntested. Test command:\n\nsudo <build dir>/app/test/dpdk-test --vdev=dma_skeleton \\\nevent_dma_adapter_autotest\n\nSigned-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>\n---\n MAINTAINERS                       |   1 +\n app/test/meson.build              |   1 +\n app/test/test_event_dma_adapter.c | 805 ++++++++++++++++++++++++++++++\n 3 files changed, 807 insertions(+)\n create mode 100644 app/test/test_event_dma_adapter.c",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 4ebbbe8bb3..92c0b47618 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -544,6 +544,7 @@ Eventdev DMA Adapter API\n M: Amit Prakash Shukla <amitprakashs@marvell.com>\n T: git://dpdk.org/next/dpdk-next-eventdev\n F: lib/eventdev/*dma_adapter*\n+F: app/test/test_event_dma_adapter.c\n F: doc/guides/prog_guide/event_dma_adapter.rst\n \n Raw device API\ndiff --git a/app/test/meson.build b/app/test/meson.build\nindex 05bae9216d..7caf5ae5fc 100644\n--- a/app/test/meson.build\n+++ b/app/test/meson.build\n@@ -66,6 +66,7 @@ source_file_deps = {\n     'test_errno.c': [],\n     'test_ethdev_link.c': ['ethdev'],\n     'test_event_crypto_adapter.c': ['cryptodev', 'eventdev', 'bus_vdev'],\n+    'test_event_dma_adapter.c': ['dmadev', 'eventdev', 'bus_vdev'],\n     'test_event_eth_rx_adapter.c': ['ethdev', 'eventdev', 'bus_vdev'],\n     'test_event_eth_tx_adapter.c': ['bus_vdev', 'ethdev', 'net_ring', 'eventdev'],\n     'test_event_ring.c': ['eventdev'],\ndiff --git a/app/test/test_event_dma_adapter.c b/app/test/test_event_dma_adapter.c\nnew file mode 100644\nindex 0000000000..1e193f4b52\n--- /dev/null\n+++ b/app/test/test_event_dma_adapter.c\n@@ -0,0 +1,805 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 Marvell.\n+ */\n+\n+#include \"test.h\"\n+#include <string.h>\n+#include <rte_common.h>\n+#include <rte_malloc.h>\n+#include <rte_mempool.h>\n+#include <rte_mbuf.h>\n+#include <rte_random.h>\n+\n+#ifdef RTE_EXEC_ENV_WINDOWS\n+static int\n+test_event_dma_adapter(void)\n+{\n+\tprintf(\"event_dma_adapter not supported on Windows, skipping test\\n\");\n+\treturn TEST_SKIPPED;\n+}\n+\n+#else\n+\n+#include <rte_bus_vdev.h>\n+#include <rte_dmadev.h>\n+#include <rte_eventdev.h>\n+#include <rte_event_dma_adapter.h>\n+#include <rte_service.h>\n+\n+#define NUM_MBUFS                 (8191)\n+#define MBUF_CACHE_SIZE           (256)\n+#define TEST_APP_PORT_ID           0\n+#define TEST_APP_EV_QUEUE_ID       0\n+#define TEST_APP_EV_PRIORITY       0\n+#define TEST_APP_EV_FLOWID         0xAABB\n+#define TEST_DMA_EV_QUEUE_ID       1\n+#define TEST_ADAPTER_ID            0\n+#define TEST_DMA_DEV_ID            0\n+#define TEST_DMA_VCHAN_ID          0\n+#define PACKET_LENGTH              1024\n+#define NB_TEST_PORTS              1\n+#define NB_TEST_QUEUES             2\n+#define NUM_CORES                  2\n+#define DMA_OP_POOL_SIZE           128\n+#define TEST_MAX_OP                32\n+#define TEST_RINGSIZE              512\n+\n+#define MBUF_SIZE                  (RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)\n+\n+/* Handle log statements in same manner as test macros */\n+#define LOG_DBG(...)    RTE_LOG(DEBUG, EAL, __VA_ARGS__)\n+\n+struct event_dma_adapter_test_params {\n+\tstruct rte_mempool *src_mbuf_pool;\n+\tstruct rte_mempool *dst_mbuf_pool;\n+\tstruct rte_mempool *op_mpool;\n+\tuint8_t dma_event_port_id;\n+\tuint8_t internal_port_op_fwd;\n+};\n+\n+struct rte_event dma_response_info = {\n+\t.queue_id = TEST_APP_EV_QUEUE_ID,\n+\t.sched_type = RTE_SCHED_TYPE_ATOMIC,\n+\t.flow_id = TEST_APP_EV_FLOWID,\n+\t.priority = TEST_APP_EV_PRIORITY\n+};\n+\n+static struct event_dma_adapter_test_params params;\n+static uint8_t dma_adapter_setup_done;\n+static uint32_t slcore_id;\n+static int evdev;\n+\n+static int\n+send_recv_ev(struct rte_event *ev)\n+{\n+\tstruct rte_event recv_ev[TEST_MAX_OP];\n+\tuint16_t nb_enqueued = 0;\n+\tint i = 0;\n+\n+\tif (params.internal_port_op_fwd) {\n+\t\tnb_enqueued = rte_event_dma_adapter_enqueue(evdev, TEST_APP_PORT_ID, ev,\n+\t\t\t\t\t\t\t    TEST_MAX_OP);\n+\t} else {\n+\t\twhile (nb_enqueued < TEST_MAX_OP) {\n+\t\t\tnb_enqueued += rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID,\n+\t\t\t\t\t\t\t       &ev[nb_enqueued], TEST_MAX_OP -\n+\t\t\t\t\t\t\t       nb_enqueued);\n+\t\t}\n+\t}\n+\n+\tTEST_ASSERT_EQUAL(nb_enqueued, TEST_MAX_OP, \"Failed to send event to dma adapter\\n\");\n+\n+\twhile (i < TEST_MAX_OP) {\n+\t\tif (rte_event_dequeue_burst(evdev, TEST_APP_PORT_ID, &recv_ev[i], 1, 0) != 1)\n+\t\t\tcontinue;\n+\t\ti++;\n+\t}\n+\n+\tTEST_ASSERT_EQUAL(i, TEST_MAX_OP, \"Test failed. Failed to dequeue events.\\n\");\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_dma_adapter_stats(void)\n+{\n+\tstruct rte_event_dma_adapter_stats stats;\n+\n+\trte_event_dma_adapter_stats_get(TEST_ADAPTER_ID, &stats);\n+\tprintf(\" +------------------------------------------------------+\\n\");\n+\tprintf(\" + DMA adapter stats for instance %u:\\n\", TEST_ADAPTER_ID);\n+\tprintf(\" + Event port poll count         0x%\" PRIx64 \"\\n\",\n+\t\tstats.event_poll_count);\n+\tprintf(\" + Event dequeue count           0x%\" PRIx64 \"\\n\",\n+\t\tstats.event_deq_count);\n+\tprintf(\" + DMA dev enqueue count         0x%\" PRIx64 \"\\n\",\n+\t\tstats.dma_enq_count);\n+\tprintf(\" + DMA dev enqueue failed count  0x%\" PRIx64 \"\\n\",\n+\t\tstats.dma_enq_fail_count);\n+\tprintf(\" + DMA dev dequeue count         0x%\" PRIx64 \"\\n\",\n+\t\tstats.dma_deq_count);\n+\tprintf(\" + Event enqueue count           0x%\" PRIx64 \"\\n\",\n+\t\tstats.event_enq_count);\n+\tprintf(\" + Event enqueue retry count     0x%\" PRIx64 \"\\n\",\n+\t\tstats.event_enq_retry_count);\n+\tprintf(\" + Event enqueue fail count      0x%\" PRIx64 \"\\n\",\n+\t\tstats.event_enq_fail_count);\n+\tprintf(\" +------------------------------------------------------+\\n\");\n+\n+\trte_event_dma_adapter_stats_reset(TEST_ADAPTER_ID);\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_dma_adapter_params(void)\n+{\n+\tstruct rte_event_dma_adapter_runtime_params out_params;\n+\tstruct rte_event_dma_adapter_runtime_params in_params;\n+\tstruct rte_event event;\n+\tuint32_t cap;\n+\tint err, rc;\n+\n+\terr = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);\n+\tTEST_ASSERT_SUCCESS(err, \"Failed to get adapter capabilities\\n\");\n+\n+\tif (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {\n+\t\terr = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t\t    TEST_DMA_VCHAN_ID, &event);\n+\t} else\n+\t\terr = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t\t    TEST_DMA_VCHAN_ID, NULL);\n+\n+\tTEST_ASSERT_SUCCESS(err, \"Failed to add vchan\\n\");\n+\n+\terr = rte_event_dma_adapter_runtime_params_init(&in_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\terr = rte_event_dma_adapter_runtime_params_init(&out_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\n+\t/* Case 1: Get the default value of mbufs processed by adapter */\n+\terr = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);\n+\tif (err == -ENOTSUP) {\n+\t\trc = TEST_SKIPPED;\n+\t\tgoto vchan_del;\n+\t}\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\n+\t/* Case 2: Set max_nb = 32 (=BATCH_SEIZE) */\n+\tin_params.max_nb = 32;\n+\n+\terr = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\n+\terr = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\tTEST_ASSERT(in_params.max_nb == out_params.max_nb, \"Expected %u got %u\",\n+\t\t    in_params.max_nb, out_params.max_nb);\n+\n+\t/* Case 3: Set max_nb = 192 */\n+\tin_params.max_nb = 192;\n+\n+\terr = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\n+\terr = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\tTEST_ASSERT(in_params.max_nb == out_params.max_nb, \"Expected %u got %u\",\n+\t\t    in_params.max_nb, out_params.max_nb);\n+\n+\t/* Case 4: Set max_nb = 256 */\n+\tin_params.max_nb = 256;\n+\n+\terr = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\n+\terr = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\tTEST_ASSERT(in_params.max_nb == out_params.max_nb, \"Expected %u got %u\",\n+\t\t    in_params.max_nb, out_params.max_nb);\n+\n+\t/* Case 5: Set max_nb = 30(<BATCH_SIZE) */\n+\tin_params.max_nb = 30;\n+\n+\terr = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\n+\terr = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\tTEST_ASSERT(in_params.max_nb == out_params.max_nb, \"Expected %u got %u\",\n+\t\t    in_params.max_nb, out_params.max_nb);\n+\n+\t/* Case 6: Set max_nb = 512 */\n+\tin_params.max_nb = 512;\n+\n+\terr = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\n+\terr = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);\n+\tTEST_ASSERT(err == 0, \"Expected 0 got %d\", err);\n+\tTEST_ASSERT(in_params.max_nb == out_params.max_nb, \"Expected %u got %u\",\n+\t\t    in_params.max_nb, out_params.max_nb);\n+\n+\trc = TEST_SUCCESS;\n+vchan_del:\n+\terr = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t    TEST_DMA_VCHAN_ID);\n+\tTEST_ASSERT_SUCCESS(err, \"Failed to delete vchan\\n\");\n+\n+\treturn rc;\n+}\n+\n+static int\n+test_op_forward_mode(void)\n+{\n+\tstruct rte_mbuf *src_mbuf[TEST_MAX_OP];\n+\tstruct rte_mbuf *dst_mbuf[TEST_MAX_OP];\n+\tstruct rte_event_dma_adapter_op *op;\n+\tstruct rte_event ev[TEST_MAX_OP];\n+\tstruct rte_event response_info;\n+\tint ret, i;\n+\n+\tret = rte_pktmbuf_alloc_bulk(params.src_mbuf_pool, src_mbuf, TEST_MAX_OP);\n+\tTEST_ASSERT_SUCCESS(ret, \"alloc src mbufs failed.\\n\");\n+\n+\tret = rte_pktmbuf_alloc_bulk(params.dst_mbuf_pool, dst_mbuf, TEST_MAX_OP);\n+\tTEST_ASSERT_SUCCESS(ret, \"alloc dst mbufs failed.\\n\");\n+\n+\tfor (i = 0; i < TEST_MAX_OP; i++) {\n+\t\tmemset(rte_pktmbuf_mtod(src_mbuf[i], void *), rte_rand(), PACKET_LENGTH);\n+\t\tmemset(rte_pktmbuf_mtod(dst_mbuf[i], void *), 0, PACKET_LENGTH);\n+\t}\n+\n+\tfor (i = 0; i < TEST_MAX_OP; i++) {\n+\t\trte_mempool_get(params.op_mpool, (void **)&op);\n+\t\tTEST_ASSERT_NOT_NULL(op, \"Failed to allocate dma operation struct\\n\");\n+\n+\t\top->src_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);\n+\t\top->dst_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);\n+\n+\t\t/* Update Op */\n+\t\top->src_seg->addr = rte_pktmbuf_iova(src_mbuf[i]);\n+\t\top->dst_seg->addr = rte_pktmbuf_iova(dst_mbuf[i]);\n+\t\top->src_seg->length = PACKET_LENGTH;\n+\t\top->dst_seg->length = PACKET_LENGTH;\n+\t\top->nb_src = 1;\n+\t\top->nb_dst = 1;\n+\t\top->flags = RTE_DMA_OP_FLAG_SUBMIT;\n+\t\top->op_mp = params.op_mpool;\n+\t\top->dma_dev_id = TEST_DMA_DEV_ID;\n+\t\top->vchan = TEST_DMA_VCHAN_ID;\n+\n+\t\tresponse_info.event = dma_response_info.event;\n+\t\trte_memcpy((uint8_t *)op + sizeof(struct rte_event_dma_adapter_op), &response_info,\n+\t\t\t   sizeof(struct rte_event));\n+\n+\t\t/* Fill in event info and update event_ptr with rte_event_dma_adapter_op */\n+\t\tmemset(&ev[i], 0, sizeof(struct rte_event));\n+\t\tev[i].event = 0;\n+\t\tev[i].event_type = RTE_EVENT_TYPE_DMADEV;\n+\t\tev[i].queue_id = TEST_DMA_EV_QUEUE_ID;\n+\t\tev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;\n+\t\tev[i].flow_id = 0xAABB;\n+\t\tev[i].event_ptr = op;\n+\t}\n+\n+\tret = send_recv_ev(ev);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to send/receive event to dma adapter\\n\");\n+\n+\ttest_dma_adapter_stats();\n+\n+\tfor (i = 0; i < TEST_MAX_OP; i++) {\n+\t\top = ev[i].event_ptr;\n+\t\tret = memcmp(rte_pktmbuf_mtod(src_mbuf[i], void *),\n+\t\t\t     rte_pktmbuf_mtod(dst_mbuf[i], void *), PACKET_LENGTH);\n+\n+\t\tTEST_ASSERT_EQUAL(ret, 0, \"Data mismatch for dma adapter\\n\");\n+\n+\t\trte_free(op->src_seg);\n+\t\trte_free(op->dst_seg);\n+\t\trte_mempool_put(op->op_mp, op);\n+\t}\n+\n+\trte_pktmbuf_free_bulk(src_mbuf, TEST_MAX_OP);\n+\trte_pktmbuf_free_bulk(dst_mbuf, TEST_MAX_OP);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+map_adapter_service_core(void)\n+{\n+\tuint32_t adapter_service_id;\n+\tint ret;\n+\n+\tif (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, &adapter_service_id) == 0) {\n+\t\tuint32_t core_list[NUM_CORES];\n+\n+\t\tret = rte_service_lcore_list(core_list, NUM_CORES);\n+\t\tTEST_ASSERT(ret >= 0, \"Failed to get service core list!\");\n+\n+\t\tif (core_list[0] != slcore_id) {\n+\t\t\tTEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),\n+\t\t\t\t\t\t\"Failed to add service core\");\n+\t\t\tTEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),\n+\t\t\t\t\t\t\"Failed to start service core\");\n+\t\t}\n+\n+\t\tTEST_ASSERT_SUCCESS(rte_service_map_lcore_set(\n+\t\t\t\t\tadapter_service_id, slcore_id, 1),\n+\t\t\t\t\t\"Failed to map adapter service\");\n+\t}\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_with_op_forward_mode(void)\n+{\n+\tuint32_t cap;\n+\tint ret;\n+\n+\tret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to get adapter capabilities\\n\");\n+\n+\tif (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&\n+\t\t\t!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))\n+\t\tmap_adapter_service_core();\n+\telse {\n+\t\tif (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))\n+\t\t\treturn TEST_SKIPPED;\n+\t}\n+\n+\tTEST_ASSERT_SUCCESS(rte_event_dma_adapter_start(TEST_ADAPTER_ID),\n+\t\t\t\"Failed to start event dma adapter\");\n+\n+\tret = test_op_forward_mode();\n+\tTEST_ASSERT_SUCCESS(ret, \"DMA - FORWARD mode test failed\\n\");\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+configure_dmadev(void)\n+{\n+\tconst struct rte_dma_conf conf = { .nb_vchans = 1};\n+\tconst struct rte_dma_vchan_conf qconf = {\n+\t\t.direction = RTE_DMA_DIR_MEM_TO_MEM,\n+\t\t.nb_desc = TEST_RINGSIZE,\n+\t};\n+\tstruct rte_dma_info info;\n+\tunsigned int elt_size;\n+\tint ret;\n+\n+\tret = rte_dma_count_avail();\n+\tRTE_TEST_ASSERT_FAIL(ret, \"No dma devices found!\\n\");\n+\n+\tret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);\n+\tTEST_ASSERT_SUCCESS(ret, \"Error with rte_dma_info_get()\\n\");\n+\n+\tif (info.max_vchans < 1)\n+\t\tRTE_LOG(ERR, USER1, \"Error, no channels available on device id %u\\n\",\n+\t\t\t\tTEST_DMA_DEV_ID);\n+\n+\tif (rte_dma_configure(TEST_DMA_DEV_ID, &conf) != 0)\n+\t\tRTE_LOG(ERR, USER1, \"Error with rte_dma_configure()\\n\");\n+\n+\tif (rte_dma_vchan_setup(TEST_DMA_DEV_ID, TEST_DMA_VCHAN_ID, &qconf) < 0)\n+\t\tRTE_LOG(ERR, USER1, \"Error with vchan configuration\\n\");\n+\n+\tret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);\n+\tif (ret != 0 || info.nb_vchans != 1)\n+\t\tRTE_LOG(ERR, USER1, \"Error, no configured vhcan reported on device id %u\\n\",\n+\t\t\t\tTEST_DMA_DEV_ID);\n+\n+\tparams.src_mbuf_pool = rte_pktmbuf_pool_create(\"DMA_ADAPTER_SRC_MBUFPOOL\", NUM_MBUFS,\n+\t\t\t\t\t\t       MBUF_CACHE_SIZE, 0, MBUF_SIZE,\n+\t\t\t\t\t\t       rte_socket_id());\n+\tRTE_TEST_ASSERT_NOT_NULL(params.src_mbuf_pool, \"Can't create DMA_SRC_MBUFPOOL\\n\");\n+\n+\tparams.dst_mbuf_pool = rte_pktmbuf_pool_create(\"DMA_ADAPTER_DST_MBUFPOOL\", NUM_MBUFS,\n+\t\t\t\t\t\t       MBUF_CACHE_SIZE, 0, MBUF_SIZE,\n+\t\t\t\t\t\t       rte_socket_id());\n+\tRTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, \"Can't create DMA_DST_MBUFPOOL\\n\");\n+\n+\telt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct rte_event);\n+\tparams.op_mpool = rte_mempool_create(\"EVENT_DMA_OP_POOL\", DMA_OP_POOL_SIZE, elt_size, 0,\n+\t\t\t\t\t     0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);\n+\tRTE_TEST_ASSERT_NOT_NULL(params.op_mpool, \"Can't create DMA_OP_POOL\\n\");\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static inline void\n+evdev_set_conf_values(struct rte_event_dev_config *dev_conf, struct rte_event_dev_info *info)\n+{\n+\tmemset(dev_conf, 0, sizeof(struct rte_event_dev_config));\n+\tdev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;\n+\tdev_conf->nb_event_ports = NB_TEST_PORTS;\n+\tdev_conf->nb_event_queues = NB_TEST_QUEUES;\n+\tdev_conf->nb_event_queue_flows = info->max_event_queue_flows;\n+\tdev_conf->nb_event_port_dequeue_depth =\n+\t\t\tinfo->max_event_port_dequeue_depth;\n+\tdev_conf->nb_event_port_enqueue_depth =\n+\t\t\tinfo->max_event_port_enqueue_depth;\n+\tdev_conf->nb_event_port_enqueue_depth =\n+\t\t\tinfo->max_event_port_enqueue_depth;\n+\tdev_conf->nb_events_limit =\n+\t\t\tinfo->max_num_events;\n+}\n+\n+static int\n+configure_eventdev(void)\n+{\n+\tstruct rte_event_queue_conf queue_conf;\n+\tstruct rte_event_dev_config devconf;\n+\tstruct rte_event_dev_info info;\n+\tuint32_t queue_count;\n+\tuint32_t port_count;\n+\tuint8_t qid;\n+\tint ret;\n+\n+\tif (!rte_event_dev_count()) {\n+\t\t/* If there is no hardware eventdev, or no software vdev was\n+\t\t * specified on the command line, create an instance of\n+\t\t * event_sw.\n+\t\t */\n+\t\tLOG_DBG(\"Failed to find a valid event device... \"\n+\t\t\t\t\"testing with event_sw device\\n\");\n+\t\tTEST_ASSERT_SUCCESS(rte_vdev_init(\"event_sw0\", NULL),\n+\t\t\t\t\"Error creating eventdev\");\n+\t\tevdev = rte_event_dev_get_dev_id(\"event_sw0\");\n+\t}\n+\n+\tret = rte_event_dev_info_get(evdev, &info);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to get event dev info\\n\");\n+\n+\tevdev_set_conf_values(&devconf, &info);\n+\n+\tret = rte_event_dev_configure(evdev, &devconf);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to configure eventdev\\n\");\n+\n+\t/* Set up event queue */\n+\tret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count);\n+\tTEST_ASSERT_SUCCESS(ret, \"Queue count get failed\\n\");\n+\tTEST_ASSERT_EQUAL(queue_count, 2, \"Unexpected queue count\\n\");\n+\n+\tqid = TEST_APP_EV_QUEUE_ID;\n+\tret = rte_event_queue_setup(evdev, qid, NULL);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to setup queue=%d\\n\", qid);\n+\n+\tqueue_conf.nb_atomic_flows = info.max_event_queue_flows;\n+\tqueue_conf.nb_atomic_order_sequences = 32;\n+\tqueue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;\n+\tqueue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;\n+\tqueue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;\n+\n+\tqid = TEST_DMA_EV_QUEUE_ID;\n+\tret = rte_event_queue_setup(evdev, qid, &queue_conf);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to setup queue=%u\\n\", qid);\n+\n+\t/* Set up event port */\n+\tret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t&port_count);\n+\tTEST_ASSERT_SUCCESS(ret, \"Port count get failed\\n\");\n+\tTEST_ASSERT_EQUAL(port_count, 1, \"Unexpected port count\\n\");\n+\n+\tret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to setup port=%d\\n\",\n+\t\t\tTEST_APP_PORT_ID);\n+\n+\tqid = TEST_APP_EV_QUEUE_ID;\n+\tret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);\n+\tTEST_ASSERT(ret >= 0, \"Failed to link queue port=%d\\n\",\n+\t\t\tTEST_APP_PORT_ID);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+test_dma_adapter_free(void)\n+{\n+\trte_event_dma_adapter_free(TEST_ADAPTER_ID);\n+}\n+\n+static int\n+test_dma_adapter_create(void)\n+{\n+\tstruct rte_event_dev_info evdev_info = {0};\n+\tstruct rte_event_port_conf conf = {0};\n+\tint ret;\n+\n+\tret = rte_event_dev_info_get(evdev, &evdev_info);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to create event dma adapter\\n\");\n+\n+\tconf.new_event_threshold = evdev_info.max_num_events;\n+\tconf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;\n+\tconf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;\n+\n+\t/* Create adapter with default port creation callback */\n+\tret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, 0);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to create event dma adapter\\n\");\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_dma_adapter_vchan_add_del(void)\n+{\n+\tstruct rte_event event;\n+\tuint32_t cap;\n+\tint ret;\n+\n+\tret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to get adapter capabilities\\n\");\n+\n+\tif (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {\n+\t\tret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t\t    TEST_DMA_VCHAN_ID, &event);\n+\t} else\n+\t\tret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t\t    TEST_DMA_VCHAN_ID, NULL);\n+\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to create add vchan\\n\");\n+\n+\tret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t    TEST_DMA_VCHAN_ID);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to delete vchan\\n\");\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+configure_event_dma_adapter(enum rte_event_dma_adapter_mode mode)\n+{\n+\tstruct rte_event_dev_info evdev_info = {0};\n+\tstruct rte_event_port_conf conf = {0};\n+\tstruct rte_event event;\n+\tuint32_t cap;\n+\tint ret;\n+\n+\tret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to get adapter capabilities\\n\");\n+\n+\t/* Skip mode and capability mismatch check for SW eventdev */\n+\tif (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&\n+\t\t\t!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&\n+\t\t\t!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))\n+\t\tgoto adapter_create;\n+\n+\tif (mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) {\n+\t\tif (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)\n+\t\t\tparams.internal_port_op_fwd = 1;\n+\t\telse\n+\t\t\treturn -ENOTSUP;\n+\t}\n+\n+adapter_create:\n+\tret = rte_event_dev_info_get(evdev, &evdev_info);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to create event dma adapter\\n\");\n+\n+\tconf.new_event_threshold = evdev_info.max_num_events;\n+\tconf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;\n+\tconf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;\n+\n+\t/* Create adapter with default port creation callback */\n+\tret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, mode);\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to create event dma adapter\\n\");\n+\n+\tif (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {\n+\t\tret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t\t    TEST_DMA_VCHAN_ID, &event);\n+\t} else\n+\t\tret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t\t    TEST_DMA_VCHAN_ID, NULL);\n+\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to add vchan\\n\");\n+\n+\tif (!params.internal_port_op_fwd) {\n+\t\tret = rte_event_dma_adapter_event_port_get(TEST_ADAPTER_ID,\n+\t\t\t\t\t\t\t   &params.dma_event_port_id);\n+\t\tTEST_ASSERT_SUCCESS(ret, \"Failed to get event port\\n\");\n+\t}\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+test_dma_adapter_stop(void)\n+{\n+\tuint32_t evdev_service_id, adapter_service_id;\n+\n+\t/* retrieve service ids & stop services */\n+\tif (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID,\n+\t\t\t\t&adapter_service_id) == 0) {\n+\t\trte_service_runstate_set(adapter_service_id, 0);\n+\t\trte_service_lcore_stop(slcore_id);\n+\t\trte_service_lcore_del(slcore_id);\n+\t\trte_event_dma_adapter_stop(TEST_ADAPTER_ID);\n+\t}\n+\n+\tif (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {\n+\t\trte_service_runstate_set(evdev_service_id, 0);\n+\t\trte_service_lcore_stop(slcore_id);\n+\t\trte_service_lcore_del(slcore_id);\n+\t\trte_dma_stop(TEST_DMA_DEV_ID);\n+\t\trte_event_dev_stop(evdev);\n+\t} else {\n+\t\trte_dma_stop(TEST_DMA_DEV_ID);\n+\t\trte_event_dev_stop(evdev);\n+\t}\n+}\n+\n+static int\n+test_dma_adapter_conf(enum rte_event_dma_adapter_mode mode)\n+{\n+\tuint32_t evdev_service_id;\n+\tuint8_t qid;\n+\tint ret;\n+\n+\tif (!dma_adapter_setup_done) {\n+\t\tret = configure_event_dma_adapter(mode);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tif (!params.internal_port_op_fwd) {\n+\t\t\tqid = TEST_DMA_EV_QUEUE_ID;\n+\t\t\tret = rte_event_port_link(evdev,\n+\t\t\t\t\tparams.dma_event_port_id, &qid, NULL, 1);\n+\t\t\tTEST_ASSERT(ret >= 0, \"Failed to link queue %d \"\n+\t\t\t\t\t\"port=%u\\n\", qid,\n+\t\t\t\t\tparams.dma_event_port_id);\n+\t\t}\n+\t\tdma_adapter_setup_done = 1;\n+\t}\n+\n+\t/* retrieve service ids */\n+\tif (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {\n+\t\t/* add a service core and start it */\n+\t\tTEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),\n+\t\t\t\t\"Failed to add service core\");\n+\t\tTEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),\n+\t\t\t\t\"Failed to start service core\");\n+\n+\t\t/* map services to it */\n+\t\tTEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,\n+\t\t\t\t\tslcore_id, 1), \"Failed to map evdev service\");\n+\n+\t\t/* set services to running */\n+\t\tTEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,\n+\t\t\t\t\t1), \"Failed to start evdev service\");\n+\t}\n+\n+\t/* start the eventdev */\n+\tTEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),\n+\t\t\t\"Failed to start event device\");\n+\n+\t/* start the dma dev */\n+\tTEST_ASSERT_SUCCESS(rte_dma_start(TEST_DMA_DEV_ID),\n+\t\t\t\"Failed to start dma device\");\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_dma_adapter_conf_op_forward_mode(void)\n+{\n+\tenum rte_event_dma_adapter_mode mode;\n+\n+\tmode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD;\n+\n+\treturn test_dma_adapter_conf(mode);\n+}\n+\n+static int\n+testsuite_setup(void)\n+{\n+\tint ret;\n+\n+\tslcore_id = rte_get_next_lcore(-1, 1, 0);\n+\tTEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, \"At least 2 lcores \"\n+\t\t\t\"are required to run this autotest\\n\");\n+\n+\t/* Setup and start event device. */\n+\tret = configure_eventdev();\n+\tTEST_ASSERT_SUCCESS(ret, \"Failed to setup eventdev\\n\");\n+\n+\t/* Setup and start dma device. */\n+\tret = configure_dmadev();\n+\tTEST_ASSERT_SUCCESS(ret, \"dmadev initialization failed\\n\");\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+dma_adapter_teardown(void)\n+{\n+\tint ret;\n+\n+\tret = rte_event_dma_adapter_stop(TEST_ADAPTER_ID);\n+\tif (ret < 0)\n+\t\tRTE_LOG(ERR, USER1, \"Failed to stop adapter!\");\n+\n+\tret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,\n+\t\t\t\t\t\t    TEST_DMA_VCHAN_ID);\n+\tif (ret < 0)\n+\t\tRTE_LOG(ERR, USER1, \"Failed to delete vchan!\");\n+\n+\tret = rte_event_dma_adapter_free(TEST_ADAPTER_ID);\n+\tif (ret < 0)\n+\t\tRTE_LOG(ERR, USER1, \"Failed to free adapter!\");\n+\n+\tdma_adapter_setup_done = 0;\n+}\n+\n+static void\n+dma_teardown(void)\n+{\n+\t/* Free mbuf mempool */\n+\tif (params.src_mbuf_pool != NULL) {\n+\t\tRTE_LOG(DEBUG, USER1, \"DMA_ADAPTER_SRC_MBUFPOOL count %u\\n\",\n+\t\t\t\trte_mempool_avail_count(params.src_mbuf_pool));\n+\t\trte_mempool_free(params.src_mbuf_pool);\n+\t\tparams.src_mbuf_pool = NULL;\n+\t}\n+\n+\tif (params.dst_mbuf_pool != NULL) {\n+\t\tRTE_LOG(DEBUG, USER1, \"DMA_ADAPTER_DST_MBUFPOOL count %u\\n\",\n+\t\t\t\trte_mempool_avail_count(params.dst_mbuf_pool));\n+\t\trte_mempool_free(params.dst_mbuf_pool);\n+\t\tparams.dst_mbuf_pool = NULL;\n+\t}\n+\n+\t/* Free ops mempool */\n+\tif (params.op_mpool != NULL) {\n+\t\tRTE_LOG(DEBUG, USER1, \"EVENT_DMA_OP_POOL count %u\\n\",\n+\t\t\t\trte_mempool_avail_count(params.op_mpool));\n+\t\trte_mempool_free(params.op_mpool);\n+\t\tparams.op_mpool = NULL;\n+\t}\n+}\n+\n+static void\n+eventdev_teardown(void)\n+{\n+\trte_event_dev_stop(evdev);\n+}\n+\n+static void\n+testsuite_teardown(void)\n+{\n+\tdma_adapter_teardown();\n+\tdma_teardown();\n+\teventdev_teardown();\n+}\n+\n+static struct unit_test_suite functional_testsuite = {\n+\t.suite_name = \"Event dma adapter test suite\",\n+\t.setup = testsuite_setup,\n+\t.teardown = testsuite_teardown,\n+\t.unit_test_cases = {\n+\n+\t\tTEST_CASE_ST(NULL, test_dma_adapter_free, test_dma_adapter_create),\n+\n+\t\tTEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,\n+\t\t\t     test_dma_adapter_vchan_add_del),\n+\n+\t\tTEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,\n+\t\t\t     test_dma_adapter_stats),\n+\n+\t\tTEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,\n+\t\t\t     test_dma_adapter_params),\n+\n+\t\tTEST_CASE_ST(test_dma_adapter_conf_op_forward_mode, test_dma_adapter_stop,\n+\t\t\t     test_with_op_forward_mode),\n+\n+\t\tTEST_CASES_END() /**< NULL terminate unit test array */\n+\t}\n+};\n+\n+static int\n+test_event_dma_adapter(void)\n+{\n+\treturn unit_test_suite_runner(&functional_testsuite);\n+}\n+\n+#endif /* !RTE_EXEC_ENV_WINDOWS */\n+\n+REGISTER_TEST_COMMAND(event_dma_adapter_autotest, test_event_dma_adapter);\n",
    "prefixes": [
        "v7",
        "12/12"
    ]
}