get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/55562/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 55562,
    "url": "http://patches.dpdk.org/api/patches/55562/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190628075024.404-26-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190628075024.404-26-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190628075024.404-26-pbhagavatula@marvell.com",
    "date": "2019-06-28T07:50:04",
    "name": "[v2,25/44] event/octeontx2: add SSO selftest",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ddc7a5fde3356c94ae90e62c7915234e7f135df1",
    "submitter": {
        "id": 1183,
        "url": "http://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190628075024.404-26-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 5217,
            "url": "http://patches.dpdk.org/api/series/5217/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=5217",
            "date": "2019-06-28T07:49:39",
            "name": "OCTEONTX2 event device driver",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/5217/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/55562/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/55562/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7C7AD1B9A6;\n\tFri, 28 Jun 2019 09:52:04 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n\t[67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 3B619378E\n\tfor <dev@dpdk.org>; Fri, 28 Jun 2019 09:51:22 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n\tby mx0a-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id\n\tx5S7oNMp001550 for <dev@dpdk.org>; Fri, 28 Jun 2019 00:51:21 -0700",
            "from sc-exch01.marvell.com ([199.233.58.181])\n\tby mx0a-0016f401.pphosted.com with ESMTP id 2tdd778asp-1\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n\tfor <dev@dpdk.org>; Fri, 28 Jun 2019 00:51:21 -0700",
            "from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH01.marvell.com\n\t(10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1367.3;\n\tFri, 28 Jun 2019 00:51:19 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com\n\t(10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n\tTransport; Fri, 28 Jun 2019 00:51:19 -0700",
            "from BG-LT7430.marvell.com (bg-lt7430.marvell.com [10.28.10.255])\n\tby maili.marvell.com (Postfix) with ESMTP id 0130D3F7044;\n\tFri, 28 Jun 2019 00:51:17 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n\th=from : to : cc :\n\tsubject : date : message-id : in-reply-to : references : mime-version\n\t: content-transfer-encoding : content-type; s=pfpt0818;\n\tbh=cDaCTUqe/d+xFcju1U+rLBTeJLsFxmjz8r5T+Ze6qgg=;\n\tb=rDCZtxBhuBOP6zHw8Iedz0UFLAbQPo3IxXg/KMtNxIj3EFBMydih4GS/mZgi4Nsrjhgw\n\tVKGRege1E3Wgea1xdqCpyTlNTYcDoji4fLgO2HoRg4tVW6C3R92pMFSsaFBgyoVsb/jg\n\tkn99J/RMBE6agisnphOdUTcJm+iSXoNMgh9Ki6eT0VV0sdPEA04P4NP5COaIsWf+1yHZ\n\tj8yyi+5CXWrZkdYa8L2VUr6WBl4PLAm8vNiM9jeJF/0FIX2QJavef4+zAiuJzDomreSt\n\tDmpFXq2sR166XxXeSom4Az+o/7BtPd50nC2RQX0VmDU+IJpZdCd6kzVP3G/7Srld3NZ+\n\tPA== ",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>",
        "CC": "<dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>",
        "Date": "Fri, 28 Jun 2019 13:20:04 +0530",
        "Message-ID": "<20190628075024.404-26-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190628075024.404-1-pbhagavatula@marvell.com>",
        "References": "<20190628075024.404-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:, ,\n\tdefinitions=2019-06-28_02:, , signatures=0",
        "Subject": "[dpdk-dev]  [PATCH v2 25/44] event/octeontx2: add SSO selftest",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd selftest to verify sanity of SSO.\nCan be run by passing devargs to SSO PF as follows:\n\n\t--dev \"0002:0e:00.0,selftest=1\"\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\nSigned-off-by: Jerin Jacob <jerinj@marvell.com>\n---\n app/test/test_eventdev.c                      |    8 +\n drivers/event/octeontx2/Makefile              |    1 +\n drivers/event/octeontx2/meson.build           |    1 +\n drivers/event/octeontx2/otx2_evdev.c          |   11 +-\n drivers/event/octeontx2/otx2_evdev.h          |    3 +\n drivers/event/octeontx2/otx2_evdev_selftest.c | 1511 +++++++++++++++++\n 6 files changed, 1534 insertions(+), 1 deletion(-)\n create mode 100644 drivers/event/octeontx2/otx2_evdev_selftest.c",
    "diff": "diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c\nindex c745e997e..783140dfe 100644\n--- a/app/test/test_eventdev.c\n+++ b/app/test/test_eventdev.c\n@@ -1014,7 +1014,15 @@ test_eventdev_selftest_octeontx(void)\n \treturn test_eventdev_selftest_impl(\"event_octeontx\", \"\");\n }\n \n+static int\n+test_eventdev_selftest_octeontx2(void)\n+{\n+\treturn test_eventdev_selftest_impl(\"otx2_eventdev\", \"\");\n+}\n+\n REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);\n REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);\n REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,\n \t\ttest_eventdev_selftest_octeontx);\n+REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,\n+\t\ttest_eventdev_selftest_octeontx2);\ndiff --git a/drivers/event/octeontx2/Makefile b/drivers/event/octeontx2/Makefile\nindex dfecda599..d6cffc1f6 100644\n--- a/drivers/event/octeontx2/Makefile\n+++ b/drivers/event/octeontx2/Makefile\n@@ -33,6 +33,7 @@ LIBABIVER := 1\n SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_worker_dual.c\n SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_worker.c\n SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_selftest.c\n SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_irq.c\n \n LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci -lrte_kvargs\ndiff --git a/drivers/event/octeontx2/meson.build b/drivers/event/octeontx2/meson.build\nindex c2a5f3e3d..470564b08 100644\n--- a/drivers/event/octeontx2/meson.build\n+++ b/drivers/event/octeontx2/meson.build\n@@ -6,6 +6,7 @@ sources = files('otx2_worker.c',\n \t\t'otx2_worker_dual.c',\n \t\t'otx2_evdev.c',\n \t\t'otx2_evdev_irq.c',\n+\t\t'otx2_evdev_selftest.c',\n \t\t)\n \n allow_experimental_apis = true\ndiff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c\nindex 5004fe2de..c5a150954 100644\n--- a/drivers/event/octeontx2/otx2_evdev.c\n+++ b/drivers/event/octeontx2/otx2_evdev.c\n@@ -1099,11 +1099,13 @@ static struct rte_eventdev_ops otx2_sso_ops = {\n \t.dev_start        = otx2_sso_start,\n \t.dev_stop         = otx2_sso_stop,\n \t.dev_close        = otx2_sso_close,\n+\t.dev_selftest     = otx2_sso_selftest,\n };\n \n #define OTX2_SSO_XAE_CNT\t\"xae_cnt\"\n #define OTX2_SSO_SINGLE_WS\t\"single_ws\"\n #define OTX2_SSO_GGRP_QOS\t\"qos\"\n+#define OTX2_SSO_SELFTEST\t\"selftest\"\n \n static void\n parse_queue_param(char *value, void *opaque)\n@@ -1186,6 +1188,8 @@ sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)\n \tif (kvlist == NULL)\n \t\treturn;\n \n+\trte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag,\n+\t\t\t   &dev->selftest);\n \trte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,\n \t\t\t   &dev->xae_cnt);\n \trte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,\n@@ -1301,6 +1305,10 @@ otx2_sso_init(struct rte_eventdev *event_dev)\n \totx2_sso_dbg(\"Initializing %s max_queues=%d max_ports=%d\",\n \t\t     event_dev->data->name, dev->max_event_queues,\n \t\t     dev->max_event_ports);\n+\tif (dev->selftest) {\n+\t\tevent_dev->dev->driver = &pci_sso.driver;\n+\t\tevent_dev->dev_ops->dev_selftest();\n+\t}\n \n \n \treturn 0;\n@@ -1347,4 +1355,5 @@ RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);\n RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, \"vfio-pci\");\n RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT \"=<int>\"\n \t\t\t      OTX2_SSO_SINGLE_WS \"=1\"\n-\t\t\t      OTX2_SSO_GGRP_QOS \"=<string>\");\n+\t\t\t      OTX2_SSO_GGRP_QOS \"=<string>\"\n+\t\t\t      OTX2_SSO_SELFTEST \"=1\");\ndiff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h\nindex 2aa742184..fc8dde416 100644\n--- a/drivers/event/octeontx2/otx2_evdev.h\n+++ b/drivers/event/octeontx2/otx2_evdev.h\n@@ -130,6 +130,7 @@ struct otx2_sso_evdev {\n \tstruct rte_mempool *xaq_pool;\n \t/* Dev args */\n \tuint8_t dual_ws;\n+\tuint8_t selftest;\n \tuint32_t xae_cnt;\n \tuint8_t qos_queue_cnt;\n \tstruct otx2_sso_qos *qos_parse_data;\n@@ -247,6 +248,8 @@ typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);\n void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,\n \t\t\t uintptr_t base, otx2_handle_event_t fn, void *arg);\n void ssogws_reset(struct otx2_ssogws *ws);\n+/* Selftest */\n+int otx2_sso_selftest(void);\n /* Init and Fini API's */\n int otx2_sso_init(struct rte_eventdev *event_dev);\n int otx2_sso_fini(struct rte_eventdev *event_dev);\ndiff --git a/drivers/event/octeontx2/otx2_evdev_selftest.c b/drivers/event/octeontx2/otx2_evdev_selftest.c\nnew file mode 100644\nindex 000000000..8440a50aa\n--- /dev/null\n+++ b/drivers/event/octeontx2/otx2_evdev_selftest.c\n@@ -0,0 +1,1511 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2019 Marvell International Ltd.\n+ */\n+\n+#include <rte_atomic.h>\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+#include <rte_debug.h>\n+#include <rte_eal.h>\n+#include <rte_ethdev.h>\n+#include <rte_eventdev.h>\n+#include <rte_hexdump.h>\n+#include <rte_launch.h>\n+#include <rte_lcore.h>\n+#include <rte_mbuf.h>\n+#include <rte_malloc.h>\n+#include <rte_memcpy.h>\n+#include <rte_per_lcore.h>\n+#include <rte_random.h>\n+#include <rte_test.h>\n+\n+#include \"otx2_evdev.h\"\n+\n+#define NUM_PACKETS (1024)\n+#define MAX_EVENTS  (1024)\n+\n+#define OCTEONTX2_TEST_RUN(setup, teardown, test) \\\n+\tocteontx_test_run(setup, teardown, test, #test)\n+\n+static int total;\n+static int passed;\n+static int failed;\n+static int unsupported;\n+\n+static int evdev;\n+static struct rte_mempool *eventdev_test_mempool;\n+\n+struct event_attr {\n+\tuint32_t flow_id;\n+\tuint8_t event_type;\n+\tuint8_t sub_event_type;\n+\tuint8_t sched_type;\n+\tuint8_t queue;\n+\tuint8_t port;\n+};\n+\n+static uint32_t seqn_list_index;\n+static int seqn_list[NUM_PACKETS];\n+\n+static inline void\n+seqn_list_init(void)\n+{\n+\tRTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);\n+\tmemset(seqn_list, 0, sizeof(seqn_list));\n+\tseqn_list_index = 0;\n+}\n+\n+static inline int\n+seqn_list_update(int val)\n+{\n+\tif (seqn_list_index >= NUM_PACKETS)\n+\t\treturn -1;\n+\n+\tseqn_list[seqn_list_index++] = val;\n+\trte_smp_wmb();\n+\treturn 0;\n+}\n+\n+static inline int\n+seqn_list_check(int limit)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < limit; i++) {\n+\t\tif (seqn_list[i] != i) {\n+\t\t\totx2_err(\"Seqn mismatch %d %d\", seqn_list[i], i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+struct test_core_param {\n+\trte_atomic32_t *total_events;\n+\tuint64_t dequeue_tmo_ticks;\n+\tuint8_t port;\n+\tuint8_t sched_type;\n+};\n+\n+static int\n+testsuite_setup(void)\n+{\n+\tconst char *eventdev_name = \"event_octeontx2\";\n+\n+\tevdev = rte_event_dev_get_dev_id(eventdev_name);\n+\tif (evdev < 0) {\n+\t\totx2_err(\"%d: Eventdev %s not found\", __LINE__, eventdev_name);\n+\t\treturn -1;\n+\t}\n+\treturn 0;\n+}\n+\n+static void\n+testsuite_teardown(void)\n+{\n+\trte_event_dev_close(evdev);\n+}\n+\n+static inline void\n+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,\n+\t\t\t\tstruct rte_event_dev_info *info)\n+{\n+\tmemset(dev_conf, 0, sizeof(struct rte_event_dev_config));\n+\tdev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;\n+\tdev_conf->nb_event_ports = info->max_event_ports;\n+\tdev_conf->nb_event_queues = info->max_event_queues;\n+\tdev_conf->nb_event_queue_flows = info->max_event_queue_flows;\n+\tdev_conf->nb_event_port_dequeue_depth =\n+\t\t\tinfo->max_event_port_dequeue_depth;\n+\tdev_conf->nb_event_port_enqueue_depth =\n+\t\t\tinfo->max_event_port_enqueue_depth;\n+\tdev_conf->nb_event_port_enqueue_depth =\n+\t\t\tinfo->max_event_port_enqueue_depth;\n+\tdev_conf->nb_events_limit =\n+\t\t\tinfo->max_num_events;\n+}\n+\n+enum {\n+\tTEST_EVENTDEV_SETUP_DEFAULT,\n+\tTEST_EVENTDEV_SETUP_PRIORITY,\n+\tTEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,\n+};\n+\n+static inline int\n+_eventdev_setup(int mode)\n+{\n+\tconst char *pool_name = \"evdev_octeontx_test_pool\";\n+\tstruct rte_event_dev_config dev_conf;\n+\tstruct rte_event_dev_info info;\n+\tint i, ret;\n+\n+\t/* Create and destrory pool for each test case to make it standalone */\n+\teventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,\n+\t\t\t\t\t\t\t0, 0, 512,\n+\t\t\t\t\t\t\trte_socket_id());\n+\tif (!eventdev_test_mempool) {\n+\t\totx2_err(\"ERROR creating mempool\");\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_event_dev_info_get(evdev, &info);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to get event dev info\");\n+\n+\tdevconf_set_default_sane_values(&dev_conf, &info);\n+\tif (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)\n+\t\tdev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;\n+\n+\tret = rte_event_dev_configure(evdev, &dev_conf);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to configure eventdev\");\n+\n+\tuint32_t queue_count;\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\n+\tif (mode == TEST_EVENTDEV_SETUP_PRIORITY) {\n+\t\tif (queue_count > 8)\n+\t\t\tqueue_count = 8;\n+\n+\t\t/* Configure event queues(0 to n) with\n+\t\t * RTE_EVENT_DEV_PRIORITY_HIGHEST to\n+\t\t * RTE_EVENT_DEV_PRIORITY_LOWEST\n+\t\t */\n+\t\tuint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /\n+\t\t\t\tqueue_count;\n+\t\tfor (i = 0; i < (int)queue_count; i++) {\n+\t\t\tstruct rte_event_queue_conf queue_conf;\n+\n+\t\t\tret = rte_event_queue_default_conf_get(evdev, i,\n+\t\t\t\t\t\t\t       &queue_conf);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to get def_conf%d\",\n+\t\t\t\t\t\ti);\n+\t\t\tqueue_conf.priority = i * step;\n+\t\t\tret = rte_event_queue_setup(evdev, i, &queue_conf);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to setup queue=%d\",\n+\t\t\t\t\t\ti);\n+\t\t}\n+\n+\t} else {\n+\t\t/* Configure event queues with default priority */\n+\t\tfor (i = 0; i < (int)queue_count; i++) {\n+\t\t\tret = rte_event_queue_setup(evdev, i, NULL);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to setup queue=%d\",\n+\t\t\t\t\t\ti);\n+\t\t}\n+\t}\n+\t/* Configure event ports */\n+\tuint32_t port_count;\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),\n+\t\t\t\t\"Port count get failed\");\n+\tfor (i = 0; i < (int)port_count; i++) {\n+\t\tret = rte_event_port_setup(evdev, i, NULL);\n+\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to setup port=%d\", i);\n+\t\tret = rte_event_port_link(evdev, i, NULL, NULL, 0);\n+\t\tRTE_TEST_ASSERT(ret >= 0, \"Failed to link all queues port=%d\",\n+\t\t\t\ti);\n+\t}\n+\n+\tret = rte_event_dev_start(evdev);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to start device\");\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+eventdev_setup(void)\n+{\n+\treturn _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);\n+}\n+\n+static inline int\n+eventdev_setup_priority(void)\n+{\n+\treturn _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);\n+}\n+\n+static inline int\n+eventdev_setup_dequeue_timeout(void)\n+{\n+\treturn _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);\n+}\n+\n+static inline void\n+eventdev_teardown(void)\n+{\n+\trte_event_dev_stop(evdev);\n+\trte_mempool_free(eventdev_test_mempool);\n+}\n+\n+static inline void\n+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,\n+\t\t\t\t uint32_t flow_id, uint8_t event_type,\n+\t\t\t\t uint8_t sub_event_type, uint8_t sched_type,\n+\t\t\t\t uint8_t queue, uint8_t port)\n+{\n+\tstruct event_attr *attr;\n+\n+\t/* Store the event attributes in mbuf for future reference */\n+\tattr = rte_pktmbuf_mtod(m, struct event_attr *);\n+\tattr->flow_id = flow_id;\n+\tattr->event_type = event_type;\n+\tattr->sub_event_type = sub_event_type;\n+\tattr->sched_type = sched_type;\n+\tattr->queue = queue;\n+\tattr->port = port;\n+\n+\tev->flow_id = flow_id;\n+\tev->sub_event_type = sub_event_type;\n+\tev->event_type = event_type;\n+\t/* Inject the new event */\n+\tev->op = RTE_EVENT_OP_NEW;\n+\tev->sched_type = sched_type;\n+\tev->queue_id = queue;\n+\tev->mbuf = m;\n+}\n+\n+static inline int\n+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,\n+\t      uint8_t sched_type, uint8_t queue, uint8_t port,\n+\t      unsigned int events)\n+{\n+\tstruct rte_mbuf *m;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < events; i++) {\n+\t\tstruct rte_event ev = {.event = 0, .u64 = 0};\n+\n+\t\tm = rte_pktmbuf_alloc(eventdev_test_mempool);\n+\t\tRTE_TEST_ASSERT_NOT_NULL(m, \"mempool alloc failed\");\n+\n+\t\tm->seqn = i;\n+\t\tupdate_event_and_validation_attr(m, &ev, flow_id, event_type,\n+\t\t\t\t\t\t sub_event_type, sched_type,\n+\t\t\t\t\t\t queue, port);\n+\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t}\n+\treturn 0;\n+}\n+\n+static inline int\n+check_excess_events(uint8_t port)\n+{\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\tint i;\n+\n+\t/* Check for excess events, try for a few times and exit */\n+\tfor (i = 0; i < 32; i++) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\n+\t\tRTE_TEST_ASSERT_SUCCESS(valid_event,\n+\t\t\t\t\t\"Unexpected valid event=%d\",\n+\t\t\t\t\tev.mbuf->seqn);\n+\t}\n+\treturn 0;\n+}\n+\n+static inline int\n+generate_random_events(const unsigned int total_events)\n+{\n+\tstruct rte_event_dev_info info;\n+\tuint32_t queue_count;\n+\tunsigned int i;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\n+\tret = rte_event_dev_info_get(evdev, &info);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to get event dev info\");\n+\tfor (i = 0; i < total_events; i++) {\n+\t\tret = inject_events(\n+\t\t\trte_rand() % info.max_event_queue_flows /*flow_id */,\n+\t\t\tRTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\trte_rand() % 256 /* sub_event_type */,\n+\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),\n+\t\t\trte_rand() % queue_count /* queue */,\n+\t\t\t0 /* port */,\n+\t\t\t1 /* events */);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\t}\n+\treturn ret;\n+}\n+\n+\n+static inline int\n+validate_event(struct rte_event *ev)\n+{\n+\tstruct event_attr *attr;\n+\n+\tattr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);\n+\tRTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,\n+\t\t\t      \"flow_id mismatch enq=%d deq =%d\",\n+\t\t\t      attr->flow_id, ev->flow_id);\n+\tRTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,\n+\t\t\t      \"event_type mismatch enq=%d deq =%d\",\n+\t\t\t      attr->event_type, ev->event_type);\n+\tRTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,\n+\t\t\t      \"sub_event_type mismatch enq=%d deq =%d\",\n+\t\t\t      attr->sub_event_type, ev->sub_event_type);\n+\tRTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,\n+\t\t\t      \"sched_type mismatch enq=%d deq =%d\",\n+\t\t\t      attr->sched_type, ev->sched_type);\n+\tRTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,\n+\t\t\t      \"queue mismatch enq=%d deq =%d\",\n+\t\t\t      attr->queue, ev->queue_id);\n+\treturn 0;\n+}\n+\n+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,\n+\t\t\t\t struct rte_event *ev);\n+\n+static inline int\n+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)\n+{\n+\tuint32_t events = 0, forward_progress_cnt = 0, index = 0;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\tint ret;\n+\n+\twhile (1) {\n+\t\tif (++forward_progress_cnt > UINT16_MAX) {\n+\t\t\totx2_err(\"Detected deadlock\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tforward_progress_cnt = 0;\n+\t\tret = validate_event(&ev);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\n+\t\tif (fn != NULL) {\n+\t\t\tret = fn(index, port, &ev);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(ret,\n+\t\t\t\t\"Failed to validate test specific event\");\n+\t\t}\n+\n+\t\t++index;\n+\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t\tif (++events >= total_events)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn check_excess_events(port);\n+}\n+\n+static int\n+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)\n+{\n+\tRTE_SET_USED(port);\n+\tRTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, \"index=%d != seqn=%d\",\n+\t\t\t      index, ev->mbuf->seqn);\n+\treturn 0;\n+}\n+\n+static inline int\n+test_simple_enqdeq(uint8_t sched_type)\n+{\n+\tint ret;\n+\n+\tret = inject_events(0 /*flow_id */,\n+\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t    0 /* sub_event_type */,\n+\t\t\t    sched_type,\n+\t\t\t    0 /* queue */,\n+\t\t\t    0 /* port */,\n+\t\t\t    MAX_EVENTS);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn consume_events(0 /* port */, MAX_EVENTS,\tvalidate_simple_enqdeq);\n+}\n+\n+static int\n+test_simple_enqdeq_ordered(void)\n+{\n+\treturn test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_simple_enqdeq_atomic(void)\n+{\n+\treturn test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_simple_enqdeq_parallel(void)\n+{\n+\treturn test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+/*\n+ * Generate a prescribed number of events and spread them across available\n+ * queues. On dequeue, using single event port(port 0) verify the enqueued\n+ * event attributes\n+ */\n+static int\n+test_multi_queue_enq_single_port_deq(void)\n+{\n+\tint ret;\n+\n+\tret = generate_random_events(MAX_EVENTS);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn consume_events(0 /* port */, MAX_EVENTS, NULL);\n+}\n+\n+/*\n+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus\n+ * operation\n+ *\n+ * For example, Inject 32 events over 0..7 queues\n+ * enqueue events 0, 8, 16, 24 in queue 0\n+ * enqueue events 1, 9, 17, 25 in queue 1\n+ * ..\n+ * ..\n+ * enqueue events 7, 15, 23, 31 in queue 7\n+ *\n+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31\n+ * order from queue0(highest priority) to queue7(lowest_priority)\n+ */\n+static int\n+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)\n+{\n+\tuint32_t queue_count;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\tif (queue_count > 8)\n+\t\tqueue_count = 8;\n+\tuint32_t range = MAX_EVENTS / queue_count;\n+\tuint32_t expected_val = (index % range) * queue_count;\n+\n+\texpected_val += ev->queue_id;\n+\tRTE_SET_USED(port);\n+\tRTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,\n+\t\"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d\",\n+\t\t\t      ev->mbuf->seqn, index, expected_val, range,\n+\t\t\t      queue_count, MAX_EVENTS);\n+\treturn 0;\n+}\n+\n+static int\n+test_multi_queue_priority(void)\n+{\n+\tint i, max_evts_roundoff;\n+\t/* See validate_queue_priority() comments for priority validate logic */\n+\tuint32_t queue_count;\n+\tstruct rte_mbuf *m;\n+\tuint8_t queue;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\tif (queue_count > 8)\n+\t\tqueue_count = 8;\n+\tmax_evts_roundoff  = MAX_EVENTS / queue_count;\n+\tmax_evts_roundoff *= queue_count;\n+\n+\tfor (i = 0; i < max_evts_roundoff; i++) {\n+\t\tstruct rte_event ev = {.event = 0, .u64 = 0};\n+\n+\t\tm = rte_pktmbuf_alloc(eventdev_test_mempool);\n+\t\tRTE_TEST_ASSERT_NOT_NULL(m, \"mempool alloc failed\");\n+\n+\t\tm->seqn = i;\n+\t\tqueue = i % queue_count;\n+\t\tupdate_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,\n+\t\t\t\t\t\t 0, RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t queue, 0);\n+\t\trte_event_enqueue_burst(evdev, 0, &ev, 1);\n+\t}\n+\n+\treturn consume_events(0, max_evts_roundoff, validate_queue_priority);\n+}\n+\n+static int\n+worker_multi_port_fn(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\trte_atomic32_t *total_events = param->total_events;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\tint ret;\n+\n+\twhile (rte_atomic32_read(total_events) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tret = validate_event(&ev);\n+\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to validate event\");\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t\trte_atomic32_sub(total_events, 1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+wait_workers_to_join(const rte_atomic32_t *count)\n+{\n+\tuint64_t cycles, print_cycles;\n+\n+\tcycles = rte_get_timer_cycles();\n+\tprint_cycles = cycles;\n+\twhile (rte_atomic32_read(count)) {\n+\t\tuint64_t new_cycles = rte_get_timer_cycles();\n+\n+\t\tif (new_cycles - print_cycles > rte_get_timer_hz()) {\n+\t\t\totx2_err(\"Events %d\", rte_atomic32_read(count));\n+\t\t\tprint_cycles = new_cycles;\n+\t\t}\n+\t\tif (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {\n+\t\t\totx2_err(\"No schedules for seconds, deadlock (%d)\",\n+\t\t\t\t rte_atomic32_read(count));\n+\t\t\trte_event_dev_dump(evdev, stdout);\n+\t\t\tcycles = new_cycles;\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\trte_eal_mp_wait_lcore();\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+launch_workers_and_wait(int (*master_worker)(void *),\n+\t\t\tint (*slave_workers)(void *), uint32_t total_events,\n+\t\t\tuint8_t nb_workers, uint8_t sched_type)\n+{\n+\trte_atomic32_t atomic_total_events;\n+\tstruct test_core_param *param;\n+\tuint64_t dequeue_tmo_ticks;\n+\tuint8_t port = 0;\n+\tint w_lcore;\n+\tint ret;\n+\n+\tif (!nb_workers)\n+\t\treturn 0;\n+\n+\trte_atomic32_set(&atomic_total_events, total_events);\n+\tseqn_list_init();\n+\n+\tparam = malloc(sizeof(struct test_core_param) * nb_workers);\n+\tif (!param)\n+\t\treturn -1;\n+\n+\tret = rte_event_dequeue_timeout_ticks(evdev,\n+\t\t\t\t\t      rte_rand() % 10000000/* 10ms */,\n+\t\t\t\t\t      &dequeue_tmo_ticks);\n+\tif (ret) {\n+\t\tfree(param);\n+\t\treturn -1;\n+\t}\n+\n+\tparam[0].total_events = &atomic_total_events;\n+\tparam[0].sched_type = sched_type;\n+\tparam[0].port = 0;\n+\tparam[0].dequeue_tmo_ticks = dequeue_tmo_ticks;\n+\trte_wmb();\n+\n+\tw_lcore = rte_get_next_lcore(\n+\t\t\t/* start core */ -1,\n+\t\t\t/* skip master */ 1,\n+\t\t\t/* wrap */ 0);\n+\trte_eal_remote_launch(master_worker, &param[0], w_lcore);\n+\n+\tfor (port = 1; port < nb_workers; port++) {\n+\t\tparam[port].total_events = &atomic_total_events;\n+\t\tparam[port].sched_type = sched_type;\n+\t\tparam[port].port = port;\n+\t\tparam[port].dequeue_tmo_ticks = dequeue_tmo_ticks;\n+\t\trte_smp_wmb();\n+\t\tw_lcore = rte_get_next_lcore(w_lcore, 1, 0);\n+\t\trte_eal_remote_launch(slave_workers, &param[port], w_lcore);\n+\t}\n+\n+\trte_smp_wmb();\n+\tret = wait_workers_to_join(&atomic_total_events);\n+\tfree(param);\n+\n+\treturn ret;\n+}\n+\n+/*\n+ * Generate a prescribed number of events and spread them across available\n+ * queues. Dequeue the events through multiple ports and verify the enqueued\n+ * event attributes\n+ */\n+static int\n+test_multi_queue_enq_multi_port_deq(void)\n+{\n+\tconst unsigned int total_events = MAX_EVENTS;\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tret = generate_random_events(total_events);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),\n+\t\t\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (!nr_ports) {\n+\t\totx2_err(\"Not enough ports=%d or workers=%d\", nr_ports,\n+\t\t\t rte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\treturn launch_workers_and_wait(worker_multi_port_fn,\n+\t\t\t\t       worker_multi_port_fn, total_events,\n+\t\t\t\t       nr_ports, 0xff /* invalid */);\n+}\n+\n+static\n+void flush(uint8_t dev_id, struct rte_event event, void *arg)\n+{\n+\tunsigned int *count = arg;\n+\n+\tRTE_SET_USED(dev_id);\n+\tif (event.event_type == RTE_EVENT_TYPE_CPU)\n+\t\t*count = *count + 1;\n+}\n+\n+static int\n+test_dev_stop_flush(void)\n+{\n+\tunsigned int total_events = MAX_EVENTS, count = 0;\n+\tint ret;\n+\n+\tret = generate_random_events(total_events);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);\n+\tif (ret)\n+\t\treturn -2;\n+\trte_event_dev_stop(evdev);\n+\tret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);\n+\tif (ret)\n+\t\treturn -3;\n+\tRTE_TEST_ASSERT_EQUAL(total_events, count,\n+\t\t\t      \"count mismatch total_events=%d count=%d\",\n+\t\t\t      total_events, count);\n+\n+\treturn 0;\n+}\n+\n+static int\n+validate_queue_to_port_single_link(uint32_t index, uint8_t port,\n+\t\t\t\t   struct rte_event *ev)\n+{\n+\tRTE_SET_USED(index);\n+\tRTE_TEST_ASSERT_EQUAL(port, ev->queue_id,\n+\t\t\t      \"queue mismatch enq=%d deq =%d\",\n+\t\t\t      port, ev->queue_id);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Link queue x to port x and check correctness of link by checking\n+ * queue_id == x on dequeue on the specific port x\n+ */\n+static int\n+test_queue_to_port_single_link(void)\n+{\n+\tint i, nr_links, ret;\n+\tuint32_t queue_count;\n+\tuint32_t port_count;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),\n+\t\t\t\t\"Port count get failed\");\n+\n+\t/* Unlink all connections that created in eventdev_setup */\n+\tfor (i = 0; i < (int)port_count; i++) {\n+\t\tret = rte_event_port_unlink(evdev, i, NULL, 0);\n+\t\tRTE_TEST_ASSERT(ret >= 0,\n+\t\t\t\t\"Failed to unlink all queues port=%d\", i);\n+\t}\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\n+\tnr_links = RTE_MIN(port_count, queue_count);\n+\tconst unsigned int total_events = MAX_EVENTS / nr_links;\n+\n+\t/* Link queue x to port x and inject events to queue x through port x */\n+\tfor (i = 0; i < nr_links; i++) {\n+\t\tuint8_t queue = (uint8_t)i;\n+\n+\t\tret = rte_event_port_link(evdev, i, &queue, NULL, 1);\n+\t\tRTE_TEST_ASSERT(ret == 1, \"Failed to link queue to port %d\", i);\n+\n+\t\tret = inject_events(0x100 /*flow_id */,\n+\t\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t\t    rte_rand() % 256 /* sub_event_type */,\n+\t\t\t\t    rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),\n+\t\t\t\t    queue /* queue */, i /* port */,\n+\t\t\t\t    total_events /* events */);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\t}\n+\n+\t/* Verify the events generated from correct queue */\n+\tfor (i = 0; i < nr_links; i++) {\n+\t\tret = consume_events(i /* port */, total_events,\n+\t\t\t\t     validate_queue_to_port_single_link);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,\n+\t\t\t\t  struct rte_event *ev)\n+{\n+\tRTE_SET_USED(index);\n+\tRTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),\n+\t\t\t      \"queue mismatch enq=%d deq =%d\",\n+\t\t\t      port, ev->queue_id);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Link all even number of queues to port 0 and all odd number of queues to\n+ * port 1 and verify the link connection on dequeue\n+ */\n+static int\n+test_queue_to_port_multi_link(void)\n+{\n+\tint ret, port0_events = 0, port1_events = 0;\n+\tuint32_t nr_queues = 0;\n+\tuint32_t nr_ports = 0;\n+\tuint8_t queue, port;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),\n+\t\t\t\t\"Queue count get failed\");\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),\n+\t\t\t\t\"Queue count get failed\");\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),\n+\t\t\t\t\"Port count get failed\");\n+\n+\tif (nr_ports < 2) {\n+\t\totx2_err(\"Not enough ports to test ports=%d\", nr_ports);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Unlink all connections that created in eventdev_setup */\n+\tfor (port = 0; port < nr_ports; port++) {\n+\t\tret = rte_event_port_unlink(evdev, port, NULL, 0);\n+\t\tRTE_TEST_ASSERT(ret >= 0, \"Failed to unlink all queues port=%d\",\n+\t\t\t\tport);\n+\t}\n+\n+\tconst unsigned int total_events = MAX_EVENTS / nr_queues;\n+\n+\t/* Link all even number of queues to port0 and odd numbers to port 1*/\n+\tfor (queue = 0; queue < nr_queues; queue++) {\n+\t\tport = queue & 0x1;\n+\t\tret = rte_event_port_link(evdev, port, &queue, NULL, 1);\n+\t\tRTE_TEST_ASSERT(ret == 1, \"Failed to link queue=%d to port=%d\",\n+\t\t\t\tqueue, port);\n+\n+\t\tret = inject_events(0x100 /*flow_id */,\n+\t\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t\t    rte_rand() % 256 /* sub_event_type */,\n+\t\t\t\t    rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),\n+\t\t\t\t    queue /* queue */, port /* port */,\n+\t\t\t\t    total_events /* events */);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\n+\t\tif (port == 0)\n+\t\t\tport0_events += total_events;\n+\t\telse\n+\t\t\tport1_events += total_events;\n+\t}\n+\n+\tret = consume_events(0 /* port */, port0_events,\n+\t\t\t     validate_queue_to_port_multi_link);\n+\tif (ret)\n+\t\treturn -1;\n+\tret = consume_events(1 /* port */, port1_events,\n+\t\t\t     validate_queue_to_port_multi_link);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+worker_flow_based_pipeline(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;\n+\trte_atomic32_t *total_events = param->total_events;\n+\tuint8_t new_sched_type = param->sched_type;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\twhile (rte_atomic32_read(total_events) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,\n+\t\t\t\t\t\t      dequeue_tmo_ticks);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\t/* Events from stage 0 */\n+\t\tif (ev.sub_event_type == 0) {\n+\t\t\t/* Move to atomic flow to maintain the ordering */\n+\t\t\tev.flow_id = 0x2;\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.sub_event_type = 1; /* stage 1 */\n+\t\t\tev.sched_type = new_sched_type;\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t} else if (ev.sub_event_type == 1) { /* Events from stage 1*/\n+\t\t\tif (seqn_list_update(ev.mbuf->seqn) == 0) {\n+\t\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\t\trte_atomic32_sub(total_events, 1);\n+\t\t\t} else {\n+\t\t\t\totx2_err(\"Failed to update seqn_list\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t} else {\n+\t\t\totx2_err(\"Invalid ev.sub_event_type = %d\",\n+\t\t\t\t ev.sub_event_type);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+test_multiport_flow_sched_type_test(uint8_t in_sched_type,\n+\t\t\t\t    uint8_t out_sched_type)\n+{\n+\tconst unsigned int total_events = MAX_EVENTS;\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),\n+\t\t\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (!nr_ports) {\n+\t\totx2_err(\"Not enough ports=%d or workers=%d\", nr_ports,\n+\t\t\t rte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Injects events with m->seqn=0 to total_events */\n+\tret = inject_events(0x1 /*flow_id */,\n+\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t    0 /* sub_event_type (stage 0) */,\n+\t\t\t    in_sched_type,\n+\t\t\t    0 /* queue */,\n+\t\t\t    0 /* port */,\n+\t\t\t    total_events /* events */);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\trte_mb();\n+\tret = launch_workers_and_wait(worker_flow_based_pipeline,\n+\t\t\t\t      worker_flow_based_pipeline, total_events,\n+\t\t\t\t      nr_ports, out_sched_type);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tif (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&\n+\t    out_sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t/* Check the events order maintained or not */\n+\t\treturn seqn_list_check(total_events);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Multi port ordered to atomic transaction */\n+static int\n+test_multi_port_flow_ordered_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_flow_ordered_to_ordered(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_flow_ordered_to_parallel(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_flow_atomic_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_flow_atomic_to_ordered(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_flow_atomic_to_parallel(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_flow_parallel_to_atomic(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_flow_parallel_to_ordered(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_flow_parallel_to_parallel(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+worker_group_based_pipeline(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;\n+\trte_atomic32_t *total_events = param->total_events;\n+\tuint8_t new_sched_type = param->sched_type;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\twhile (rte_atomic32_read(total_events) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,\n+\t\t\t\t\t\t      dequeue_tmo_ticks);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\t/* Events from stage 0(group 0) */\n+\t\tif (ev.queue_id == 0) {\n+\t\t\t/* Move to atomic flow to maintain the ordering */\n+\t\t\tev.flow_id = 0x2;\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.sched_type = new_sched_type;\n+\t\t\tev.queue_id = 1; /* Stage 1*/\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/\n+\t\t\tif (seqn_list_update(ev.mbuf->seqn) == 0) {\n+\t\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\t\trte_atomic32_sub(total_events, 1);\n+\t\t\t} else {\n+\t\t\t\totx2_err(\"Failed to update seqn_list\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t} else {\n+\t\t\totx2_err(\"Invalid ev.queue_id = %d\", ev.queue_id);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_multiport_queue_sched_type_test(uint8_t in_sched_type,\n+\t\t\t\t     uint8_t out_sched_type)\n+{\n+\tconst unsigned int total_events = MAX_EVENTS;\n+\tuint32_t queue_count;\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),\n+\t\t\t\t\"Port count get failed\");\n+\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\tif (queue_count < 2 ||  !nr_ports) {\n+\t\totx2_err(\"Not enough queues=%d ports=%d or workers=%d\",\n+\t\t\t queue_count, nr_ports,\n+\t\t\t rte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Injects events with m->seqn=0 to total_events */\n+\tret = inject_events(0x1 /*flow_id */,\n+\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t    0 /* sub_event_type (stage 0) */,\n+\t\t\t    in_sched_type,\n+\t\t\t    0 /* queue */,\n+\t\t\t    0 /* port */,\n+\t\t\t    total_events /* events */);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tret = launch_workers_and_wait(worker_group_based_pipeline,\n+\t\t\t\t      worker_group_based_pipeline, total_events,\n+\t\t\t\t      nr_ports, out_sched_type);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tif (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&\n+\t    out_sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t/* Check the events order maintained or not */\n+\t\treturn seqn_list_check(total_events);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_multi_port_queue_ordered_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_queue_ordered_to_ordered(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_queue_ordered_to_parallel(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_queue_atomic_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_queue_atomic_to_ordered(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_queue_atomic_to_parallel(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_queue_parallel_to_atomic(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_queue_parallel_to_ordered(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_queue_parallel_to_parallel(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\trte_atomic32_t *total_events = param->total_events;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\twhile (rte_atomic32_read(total_events) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tif (ev.sub_event_type == 255) { /* last stage */\n+\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\trte_atomic32_sub(total_events, 1);\n+\t\t} else {\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.sub_event_type++;\n+\t\t\tev.sched_type =\n+\t\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))\n+{\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),\n+\t\t\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (!nr_ports) {\n+\t\totx2_err(\"Not enough ports=%d or workers=%d\",\n+\t\t\t nr_ports, rte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Injects events with m->seqn=0 to total_events */\n+\tret = inject_events(0x1 /*flow_id */,\n+\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t    0 /* sub_event_type (stage 0) */,\n+\t\t\t    rte_rand() %\n+\t\t\t\t(RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,\n+\t\t\t    0 /* queue */,\n+\t\t\t    0 /* port */,\n+\t\t\t    MAX_EVENTS /* events */);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,\n+\t\t\t\t       0xff /* invalid */);\n+}\n+\n+/* Flow based pipeline with maximum stages with random sched type */\n+static int\n+test_multi_port_flow_max_stages_random_sched_type(void)\n+{\n+\treturn launch_multi_port_max_stages_random_sched_type(\n+\t\tworker_flow_based_pipeline_max_stages_rand_sched_type);\n+}\n+\n+static int\n+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint8_t port = param->port;\n+\tuint32_t queue_count;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\tuint8_t nr_queues = queue_count;\n+\trte_atomic32_t *total_events = param->total_events;\n+\n+\twhile (rte_atomic32_read(total_events) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tif (ev.queue_id == nr_queues - 1) { /* last stage */\n+\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\trte_atomic32_sub(total_events, 1);\n+\t\t} else {\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.queue_id++;\n+\t\t\tev.sched_type =\n+\t\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Queue based pipeline with maximum stages with random sched type */\n+static int\n+test_multi_port_queue_max_stages_random_sched_type(void)\n+{\n+\treturn launch_multi_port_max_stages_random_sched_type(\n+\t\tworker_queue_based_pipeline_max_stages_rand_sched_type);\n+}\n+\n+static int\n+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint8_t port = param->port;\n+\tuint32_t queue_count;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),\n+\t\t\t\t\"Queue count get failed\");\n+\tuint8_t nr_queues = queue_count;\n+\trte_atomic32_t *total_events = param->total_events;\n+\n+\twhile (rte_atomic32_read(total_events) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tif (ev.queue_id == nr_queues - 1) { /* Last stage */\n+\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\trte_atomic32_sub(total_events, 1);\n+\t\t} else {\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.queue_id++;\n+\t\t\tev.sub_event_type = rte_rand() % 256;\n+\t\t\tev.sched_type =\n+\t\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Queue and flow based pipeline with maximum stages with random sched type */\n+static int\n+test_multi_port_mixed_max_stages_random_sched_type(void)\n+{\n+\treturn launch_multi_port_max_stages_random_sched_type(\n+\t\tworker_mixed_pipeline_max_stages_rand_sched_type);\n+}\n+\n+static int\n+worker_ordered_flow_producer(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint8_t port = param->port;\n+\tstruct rte_mbuf *m;\n+\tint counter = 0;\n+\n+\twhile (counter < NUM_PACKETS) {\n+\t\tm = rte_pktmbuf_alloc(eventdev_test_mempool);\n+\t\tif (m == NULL)\n+\t\t\tcontinue;\n+\n+\t\tm->seqn = counter++;\n+\n+\t\tstruct rte_event ev = {.event = 0, .u64 = 0};\n+\n+\t\tev.flow_id = 0x1; /* Generate a fat flow */\n+\t\tev.sub_event_type = 0;\n+\t\t/* Inject the new event */\n+\t\tev.op = RTE_EVENT_OP_NEW;\n+\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\tev.sched_type = RTE_SCHED_TYPE_ORDERED;\n+\t\tev.queue_id = 0;\n+\t\tev.mbuf = m;\n+\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+test_producer_consumer_ingress_order_test(int (*fn)(void *))\n+{\n+\tuint32_t nr_ports;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,\n+\t\t\t\tRTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),\n+\t\t\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (rte_lcore_count() < 3 || nr_ports < 2) {\n+\t\totx2_err(\"### Not enough cores for test.\");\n+\t\treturn 0;\n+\t}\n+\n+\tlaunch_workers_and_wait(worker_ordered_flow_producer, fn,\n+\t\t\t\tNUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);\n+\t/* Check the events order maintained or not */\n+\treturn seqn_list_check(NUM_PACKETS);\n+}\n+\n+/* Flow based producer consumer ingress order test */\n+static int\n+test_flow_producer_consumer_ingress_order_test(void)\n+{\n+\treturn test_producer_consumer_ingress_order_test(\n+\t\t\t\tworker_flow_based_pipeline);\n+}\n+\n+/* Queue based producer consumer ingress order test */\n+static int\n+test_queue_producer_consumer_ingress_order_test(void)\n+{\n+\treturn test_producer_consumer_ingress_order_test(\n+\t\t\t\tworker_group_based_pipeline);\n+}\n+\n+static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),\n+\t\t\t      int (*test)(void), const char *name)\n+{\n+\tif (setup() < 0) {\n+\t\tprintf(\"Error setting up test %s\", name);\n+\t\tunsupported++;\n+\t} else {\n+\t\tif (test() < 0) {\n+\t\t\tfailed++;\n+\t\t\tprintf(\"+ TestCase [%2d] : %s failed\\n\", total, name);\n+\t\t} else {\n+\t\t\tpassed++;\n+\t\t\tprintf(\"+ TestCase [%2d] : %s succeeded\\n\", total,\n+\t\t\t       name);\n+\t\t}\n+\t}\n+\n+\ttotal++;\n+\ttdown();\n+}\n+\n+int\n+otx2_sso_selftest(void)\n+{\n+\ttestsuite_setup();\n+\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_simple_enqdeq_ordered);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_simple_enqdeq_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_simple_enqdeq_parallel);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_queue_enq_single_port_deq);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_dev_stop_flush);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_queue_enq_multi_port_deq);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_queue_to_port_single_link);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_queue_to_port_multi_link);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_ordered_to_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_ordered_to_ordered);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_ordered_to_parallel);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_atomic_to_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_atomic_to_ordered);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_atomic_to_parallel);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_parallel_to_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_parallel_to_ordered);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_parallel_to_parallel);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_ordered_to_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_ordered_to_ordered);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_ordered_to_parallel);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_atomic_to_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_atomic_to_ordered);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_atomic_to_parallel);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_parallel_to_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_parallel_to_ordered);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_parallel_to_parallel);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_max_stages_random_sched_type);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_max_stages_random_sched_type);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_multi_port_mixed_max_stages_random_sched_type);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_flow_producer_consumer_ingress_order_test);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t\t   test_queue_producer_consumer_ingress_order_test);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup_priority, eventdev_teardown,\n+\t\t\t   test_multi_queue_priority);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,\n+\t\t\t   test_multi_port_flow_ordered_to_atomic);\n+\tOCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,\n+\t\t\t   test_multi_port_queue_ordered_to_atomic);\n+\tprintf(\"Total tests   : %d\\n\", total);\n+\tprintf(\"Passed        : %d\\n\", passed);\n+\tprintf(\"Failed        : %d\\n\", failed);\n+\tprintf(\"Not supported : %d\\n\", unsupported);\n+\n+\ttestsuite_teardown();\n+\n+\tif (failed)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n",
    "prefixes": [
        "v2",
        "25/44"
    ]
}