get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/88660/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 88660,
    "url": "http://patches.dpdk.org/api/patches/88660/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210306162942.6845-19-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210306162942.6845-19-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210306162942.6845-19-pbhagavatula@marvell.com",
    "date": "2021-03-06T16:29:23",
    "name": "[18/36] event/cnxk: add SSO selftest and dump",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8122805d7c3f854ff4abae25d0497c9f51aed70b",
    "submitter": {
        "id": 1183,
        "url": "http://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210306162942.6845-19-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 15516,
            "url": "http://patches.dpdk.org/api/series/15516/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=15516",
            "date": "2021-03-06T16:29:05",
            "name": "Marvell CNXK Event device Driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/15516/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/88660/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/88660/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D2F55A0548;\n\tSat,  6 Mar 2021 17:33:20 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C029F22A4CB;\n\tSat,  6 Mar 2021 17:31:14 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id E3C2C22A4CB\n for <dev@dpdk.org>; Sat,  6 Mar 2021 17:31:12 +0100 (CET)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 126GR2YL027797 for <dev@dpdk.org>; Sat, 6 Mar 2021 08:31:12 -0800",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com with ESMTP id 3747yurexb-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Sat, 06 Mar 2021 08:31:11 -0800",
            "from SC-EXCH02.marvell.com (10.93.176.82) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Sat, 6 Mar 2021 08:31:10 -0800",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by SC-EXCH02.marvell.com\n (10.93.176.82) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Sat, 6 Mar 2021 08:31:09 -0800",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Sat, 6 Mar 2021 08:31:09 -0800",
            "from BG-LT7430.marvell.com (unknown [10.193.68.121])\n by maili.marvell.com (Postfix) with ESMTP id E75E43F703F;\n Sat,  6 Mar 2021 08:31:07 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=U+HT5WXdPQvK/tGrr5ZCH8bLKsiaNAhH+pRX8m7KbY8=;\n b=RsewXPKMRKYN1CkedlpVd6dr84FbVSdd2nJz47lyZlpWyag5dqBKaugRcpXKKufAkC9n\n CNgqMTxFsuGOjzVjYewaYwtTcwti8Y1lS6NFNSaa+3ESii+LSYejXZHkQZT+UWJD/FvB\n Tw+xFOSWdRRDtbTe652XHlGz8xEJRsNFo9m7bYpWKJTRFNdMECpU78vGSAeFAW0JvrT+\n mJL1x2lDgOEZu0GgwqXn1ToTDMa23Vt2f1xhwZ1ZysAN6bkwZzXbbjJH5gJrEIqNJZ8Z\n qhcVC9QzhjbHAA5tBSry3o3MydG+CD20jDMljBEGTPEUj3hVyvmF4vJ3SMTu/uxroUWF 3A==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>, \"Shijith\n Thotton\" <sthotton@marvell.com>",
        "CC": "<ndabilpuram@marvell.com>, <dev@dpdk.org>",
        "Date": "Sat, 6 Mar 2021 21:59:23 +0530",
        "Message-ID": "<20210306162942.6845-19-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210306162942.6845-1-pbhagavatula@marvell.com>",
        "References": "<20210306162942.6845-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.369, 18.0.761\n definitions=2021-03-06_08:2021-03-03,\n 2021-03-06 signatures=0",
        "Subject": "[dpdk-dev] [PATCH 18/36] event/cnxk: add SSO selftest and dump",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd selftest to verify sanity of SSO and also add function to\ndump internal state of SSO.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\n---\n app/test/test_eventdev.c               |   14 +\n drivers/event/cnxk/cn10k_eventdev.c    |    8 +\n drivers/event/cnxk/cn9k_eventdev.c     |   10 +-\n drivers/event/cnxk/cnxk_eventdev.c     |    8 +\n drivers/event/cnxk/cnxk_eventdev.h     |    5 +\n drivers/event/cnxk/cnxk_sso_selftest.c | 1570 ++++++++++++++++++++++++\n drivers/event/cnxk/meson.build         |    3 +-\n 7 files changed, 1616 insertions(+), 2 deletions(-)\n create mode 100644 drivers/event/cnxk/cnxk_sso_selftest.c",
    "diff": "diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c\nindex 27ca5a649..107003f0b 100644\n--- a/app/test/test_eventdev.c\n+++ b/app/test/test_eventdev.c\n@@ -1042,6 +1042,18 @@ test_eventdev_selftest_dlb2(void)\n \treturn test_eventdev_selftest_impl(\"dlb2_event\", \"\");\n }\n \n+static int\n+test_eventdev_selftest_cn9k(void)\n+{\n+\treturn test_eventdev_selftest_impl(\"event_cn9k\", \"\");\n+}\n+\n+static int\n+test_eventdev_selftest_cn10k(void)\n+{\n+\treturn test_eventdev_selftest_impl(\"event_cn10k\", \"\");\n+}\n+\n REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);\n REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);\n REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,\n@@ -1051,3 +1063,5 @@ REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,\n REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);\n REGISTER_TEST_COMMAND(eventdev_selftest_dlb, test_eventdev_selftest_dlb);\n REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);\n+REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);\n+REGISTER_TEST_COMMAND(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k);\ndiff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c\nindex 6a0b9bcd9..74070e005 100644\n--- a/drivers/event/cnxk/cn10k_eventdev.c\n+++ b/drivers/event/cnxk/cn10k_eventdev.c\n@@ -401,6 +401,12 @@ cn10k_sso_close(struct rte_eventdev *event_dev)\n \treturn cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);\n }\n \n+static int\n+cn10k_sso_selftest(void)\n+{\n+\treturn cnxk_sso_selftest(RTE_STR(event_cn10k));\n+}\n+\n static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.dev_infos_get = cn10k_sso_info_get,\n \t.dev_configure = cn10k_sso_dev_configure,\n@@ -414,9 +420,11 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {\n \t.port_unlink = cn10k_sso_port_unlink,\n \t.timeout_ticks = cnxk_sso_timeout_ticks,\n \n+\t.dump = cnxk_sso_dump,\n \t.dev_start = cn10k_sso_start,\n \t.dev_stop = cn10k_sso_stop,\n \t.dev_close = cn10k_sso_close,\n+\t.dev_selftest = cn10k_sso_selftest,\n };\n \n static int\ndiff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c\nindex 195ed49d8..4fb0f1ccc 100644\n--- a/drivers/event/cnxk/cn9k_eventdev.c\n+++ b/drivers/event/cnxk/cn9k_eventdev.c\n@@ -222,7 +222,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)\n \t}\n }\n \n-static void\n+void\n cn9k_sso_set_rsrc(void *arg)\n {\n \tstruct cnxk_sso_evdev *dev = arg;\n@@ -475,6 +475,12 @@ cn9k_sso_close(struct rte_eventdev *event_dev)\n \treturn cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);\n }\n \n+static int\n+cn9k_sso_selftest(void)\n+{\n+\treturn cnxk_sso_selftest(RTE_STR(event_cn9k));\n+}\n+\n static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.dev_infos_get = cn9k_sso_info_get,\n \t.dev_configure = cn9k_sso_dev_configure,\n@@ -488,9 +494,11 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {\n \t.port_unlink = cn9k_sso_port_unlink,\n \t.timeout_ticks = cnxk_sso_timeout_ticks,\n \n+\t.dump = cnxk_sso_dump,\n \t.dev_start = cn9k_sso_start,\n \t.dev_stop = cn9k_sso_stop,\n \t.dev_close = cn9k_sso_close,\n+\t.dev_selftest = cn9k_sso_selftest,\n };\n \n static int\ndiff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c\nindex 01685633d..dbd35ca5d 100644\n--- a/drivers/event/cnxk/cnxk_eventdev.c\n+++ b/drivers/event/cnxk/cnxk_eventdev.c\n@@ -326,6 +326,14 @@ cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,\n \treturn 0;\n }\n \n+void\n+cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)\n+{\n+\tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n+\n+\troc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);\n+}\n+\n static void\n cnxk_handle_event(void *arg, struct rte_event event)\n {\ndiff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h\nindex 1030d5840..ee7dce5f5 100644\n--- a/drivers/event/cnxk/cnxk_eventdev.h\n+++ b/drivers/event/cnxk/cnxk_eventdev.h\n@@ -211,5 +211,10 @@ void cnxk_sso_stop(struct rte_eventdev *event_dev,\n \t\t   cnxk_sso_hws_reset_t reset_fn,\n \t\t   cnxk_sso_hws_flush_t flush_fn);\n int cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn);\n+int cnxk_sso_selftest(const char *dev_name);\n+void cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f);\n+\n+/* CN9K */\n+void cn9k_sso_set_rsrc(void *arg);\n \n #endif /* __CNXK_EVENTDEV_H__ */\ndiff --git a/drivers/event/cnxk/cnxk_sso_selftest.c b/drivers/event/cnxk/cnxk_sso_selftest.c\nnew file mode 100644\nindex 000000000..c99a81327\n--- /dev/null\n+++ b/drivers/event/cnxk/cnxk_sso_selftest.c\n@@ -0,0 +1,1570 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2019 Marvell International Ltd.\n+ */\n+\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+#include <rte_debug.h>\n+#include <rte_eal.h>\n+#include <rte_ethdev.h>\n+#include <rte_eventdev.h>\n+#include <rte_hexdump.h>\n+#include <rte_launch.h>\n+#include <rte_lcore.h>\n+#include <rte_malloc.h>\n+#include <rte_mbuf.h>\n+#include <rte_memcpy.h>\n+#include <rte_per_lcore.h>\n+#include <rte_random.h>\n+#include <rte_test.h>\n+\n+#include \"cnxk_eventdev.h\"\n+\n+#define NUM_PACKETS (1024)\n+#define MAX_EVENTS  (1024)\n+#define MAX_STAGES  (255)\n+\n+#define CNXK_TEST_RUN(setup, teardown, test)                                   \\\n+\tcnxk_test_run(setup, teardown, test, #test)\n+\n+static int total;\n+static int passed;\n+static int failed;\n+static int unsupported;\n+\n+static int evdev;\n+static struct rte_mempool *eventdev_test_mempool;\n+\n+struct event_attr {\n+\tuint32_t flow_id;\n+\tuint8_t event_type;\n+\tuint8_t sub_event_type;\n+\tuint8_t sched_type;\n+\tuint8_t queue;\n+\tuint8_t port;\n+};\n+\n+static uint32_t seqn_list_index;\n+static int seqn_list[NUM_PACKETS];\n+\n+static inline void\n+seqn_list_init(void)\n+{\n+\tRTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);\n+\tmemset(seqn_list, 0, sizeof(seqn_list));\n+\tseqn_list_index = 0;\n+}\n+\n+static inline int\n+seqn_list_update(int val)\n+{\n+\tif (seqn_list_index >= NUM_PACKETS)\n+\t\treturn -1;\n+\n+\tseqn_list[seqn_list_index++] = val;\n+\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\treturn 0;\n+}\n+\n+static inline int\n+seqn_list_check(int limit)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < limit; i++) {\n+\t\tif (seqn_list[i] != i) {\n+\t\t\tplt_err(\"Seqn mismatch %d %d\", seqn_list[i], i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+struct test_core_param {\n+\tuint32_t *total_events;\n+\tuint64_t dequeue_tmo_ticks;\n+\tuint8_t port;\n+\tuint8_t sched_type;\n+};\n+\n+static int\n+testsuite_setup(const char *eventdev_name)\n+{\n+\tevdev = rte_event_dev_get_dev_id(eventdev_name);\n+\tif (evdev < 0) {\n+\t\tplt_err(\"%d: Eventdev %s not found\", __LINE__, eventdev_name);\n+\t\treturn -1;\n+\t}\n+\treturn 0;\n+}\n+\n+static void\n+testsuite_teardown(void)\n+{\n+\trte_event_dev_close(evdev);\n+\ttotal = 0;\n+\tpassed = 0;\n+\tfailed = 0;\n+\tunsupported = 0;\n+}\n+\n+static inline void\n+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,\n+\t\t\t\tstruct rte_event_dev_info *info)\n+{\n+\tmemset(dev_conf, 0, sizeof(struct rte_event_dev_config));\n+\tdev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;\n+\tdev_conf->nb_event_ports = info->max_event_ports;\n+\tdev_conf->nb_event_queues = info->max_event_queues;\n+\tdev_conf->nb_event_queue_flows = info->max_event_queue_flows;\n+\tdev_conf->nb_event_port_dequeue_depth =\n+\t\tinfo->max_event_port_dequeue_depth;\n+\tdev_conf->nb_event_port_enqueue_depth =\n+\t\tinfo->max_event_port_enqueue_depth;\n+\tdev_conf->nb_event_port_enqueue_depth =\n+\t\tinfo->max_event_port_enqueue_depth;\n+\tdev_conf->nb_events_limit = info->max_num_events;\n+}\n+\n+enum {\n+\tTEST_EVENTDEV_SETUP_DEFAULT,\n+\tTEST_EVENTDEV_SETUP_PRIORITY,\n+\tTEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,\n+};\n+\n+static inline int\n+_eventdev_setup(int mode)\n+{\n+\tconst char *pool_name = \"evdev_cnxk_test_pool\";\n+\tstruct rte_event_dev_config dev_conf;\n+\tstruct rte_event_dev_info info;\n+\tint i, ret;\n+\n+\t/* Create and destrory pool for each test case to make it standalone */\n+\teventdev_test_mempool = rte_pktmbuf_pool_create(\n+\t\tpool_name, MAX_EVENTS, 0, 0, 512, rte_socket_id());\n+\tif (!eventdev_test_mempool) {\n+\t\tplt_err(\"ERROR creating mempool\");\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_event_dev_info_get(evdev, &info);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to get event dev info\");\n+\n+\tdevconf_set_default_sane_values(&dev_conf, &info);\n+\tif (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)\n+\t\tdev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;\n+\n+\tret = rte_event_dev_configure(evdev, &dev_conf);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to configure eventdev\");\n+\n+\tuint32_t queue_count;\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\n+\tif (mode == TEST_EVENTDEV_SETUP_PRIORITY) {\n+\t\tif (queue_count > 8)\n+\t\t\tqueue_count = 8;\n+\n+\t\t/* Configure event queues(0 to n) with\n+\t\t * RTE_EVENT_DEV_PRIORITY_HIGHEST to\n+\t\t * RTE_EVENT_DEV_PRIORITY_LOWEST\n+\t\t */\n+\t\tuint8_t step =\n+\t\t\t(RTE_EVENT_DEV_PRIORITY_LOWEST + 1) / queue_count;\n+\t\tfor (i = 0; i < (int)queue_count; i++) {\n+\t\t\tstruct rte_event_queue_conf queue_conf;\n+\n+\t\t\tret = rte_event_queue_default_conf_get(evdev, i,\n+\t\t\t\t\t\t\t       &queue_conf);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to get def_conf%d\",\n+\t\t\t\t\t\ti);\n+\t\t\tqueue_conf.priority = i * step;\n+\t\t\tret = rte_event_queue_setup(evdev, i, &queue_conf);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to setup queue=%d\",\n+\t\t\t\t\t\ti);\n+\t\t}\n+\n+\t} else {\n+\t\t/* Configure event queues with default priority */\n+\t\tfor (i = 0; i < (int)queue_count; i++) {\n+\t\t\tret = rte_event_queue_setup(evdev, i, NULL);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to setup queue=%d\",\n+\t\t\t\t\t\ti);\n+\t\t}\n+\t}\n+\t/* Configure event ports */\n+\tuint32_t port_count;\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &port_count),\n+\t\t\"Port count get failed\");\n+\tfor (i = 0; i < (int)port_count; i++) {\n+\t\tret = rte_event_port_setup(evdev, i, NULL);\n+\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to setup port=%d\", i);\n+\t\tret = rte_event_port_link(evdev, i, NULL, NULL, 0);\n+\t\tRTE_TEST_ASSERT(ret >= 0, \"Failed to link all queues port=%d\",\n+\t\t\t\ti);\n+\t}\n+\n+\tret = rte_event_dev_start(evdev);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to start device\");\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+eventdev_setup(void)\n+{\n+\treturn _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);\n+}\n+\n+static inline int\n+eventdev_setup_priority(void)\n+{\n+\treturn _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);\n+}\n+\n+static inline int\n+eventdev_setup_dequeue_timeout(void)\n+{\n+\treturn _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);\n+}\n+\n+static inline void\n+eventdev_teardown(void)\n+{\n+\trte_event_dev_stop(evdev);\n+\trte_mempool_free(eventdev_test_mempool);\n+}\n+\n+static inline void\n+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,\n+\t\t\t\t uint32_t flow_id, uint8_t event_type,\n+\t\t\t\t uint8_t sub_event_type, uint8_t sched_type,\n+\t\t\t\t uint8_t queue, uint8_t port)\n+{\n+\tstruct event_attr *attr;\n+\n+\t/* Store the event attributes in mbuf for future reference */\n+\tattr = rte_pktmbuf_mtod(m, struct event_attr *);\n+\tattr->flow_id = flow_id;\n+\tattr->event_type = event_type;\n+\tattr->sub_event_type = sub_event_type;\n+\tattr->sched_type = sched_type;\n+\tattr->queue = queue;\n+\tattr->port = port;\n+\n+\tev->flow_id = flow_id;\n+\tev->sub_event_type = sub_event_type;\n+\tev->event_type = event_type;\n+\t/* Inject the new event */\n+\tev->op = RTE_EVENT_OP_NEW;\n+\tev->sched_type = sched_type;\n+\tev->queue_id = queue;\n+\tev->mbuf = m;\n+}\n+\n+static inline int\n+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,\n+\t      uint8_t sched_type, uint8_t queue, uint8_t port,\n+\t      unsigned int events)\n+{\n+\tstruct rte_mbuf *m;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < events; i++) {\n+\t\tstruct rte_event ev = {.event = 0, .u64 = 0};\n+\n+\t\tm = rte_pktmbuf_alloc(eventdev_test_mempool);\n+\t\tRTE_TEST_ASSERT_NOT_NULL(m, \"mempool alloc failed\");\n+\n+\t\t*rte_event_pmd_selftest_seqn(m) = i;\n+\t\tupdate_event_and_validation_attr(m, &ev, flow_id, event_type,\n+\t\t\t\t\t\t sub_event_type, sched_type,\n+\t\t\t\t\t\t queue, port);\n+\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t}\n+\treturn 0;\n+}\n+\n+static inline int\n+check_excess_events(uint8_t port)\n+{\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\tint i;\n+\n+\t/* Check for excess events, try for a few times and exit */\n+\tfor (i = 0; i < 32; i++) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\n+\t\tRTE_TEST_ASSERT_SUCCESS(valid_event,\n+\t\t\t\t\t\"Unexpected valid event=%d\",\n+\t\t\t\t\t*rte_event_pmd_selftest_seqn(ev.mbuf));\n+\t}\n+\treturn 0;\n+}\n+\n+static inline int\n+generate_random_events(const unsigned int total_events)\n+{\n+\tstruct rte_event_dev_info info;\n+\tuint32_t queue_count;\n+\tunsigned int i;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\n+\tret = rte_event_dev_info_get(evdev, &info);\n+\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to get event dev info\");\n+\tfor (i = 0; i < total_events; i++) {\n+\t\tret = inject_events(\n+\t\t\trte_rand() % info.max_event_queue_flows /*flow_id */,\n+\t\t\tRTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\trte_rand() % 256 /* sub_event_type */,\n+\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),\n+\t\t\trte_rand() % queue_count /* queue */, 0 /* port */,\n+\t\t\t1 /* events */);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\t}\n+\treturn ret;\n+}\n+\n+static inline int\n+validate_event(struct rte_event *ev)\n+{\n+\tstruct event_attr *attr;\n+\n+\tattr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);\n+\tRTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,\n+\t\t\t      \"flow_id mismatch enq=%d deq =%d\", attr->flow_id,\n+\t\t\t      ev->flow_id);\n+\tRTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,\n+\t\t\t      \"event_type mismatch enq=%d deq =%d\",\n+\t\t\t      attr->event_type, ev->event_type);\n+\tRTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,\n+\t\t\t      \"sub_event_type mismatch enq=%d deq =%d\",\n+\t\t\t      attr->sub_event_type, ev->sub_event_type);\n+\tRTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,\n+\t\t\t      \"sched_type mismatch enq=%d deq =%d\",\n+\t\t\t      attr->sched_type, ev->sched_type);\n+\tRTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,\n+\t\t\t      \"queue mismatch enq=%d deq =%d\", attr->queue,\n+\t\t\t      ev->queue_id);\n+\treturn 0;\n+}\n+\n+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,\n+\t\t\t\t struct rte_event *ev);\n+\n+static inline int\n+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)\n+{\n+\tuint32_t events = 0, forward_progress_cnt = 0, index = 0;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\tint ret;\n+\n+\twhile (1) {\n+\t\tif (++forward_progress_cnt > UINT16_MAX) {\n+\t\t\tplt_err(\"Detected deadlock\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tforward_progress_cnt = 0;\n+\t\tret = validate_event(&ev);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\n+\t\tif (fn != NULL) {\n+\t\t\tret = fn(index, port, &ev);\n+\t\t\tRTE_TEST_ASSERT_SUCCESS(\n+\t\t\t\tret, \"Failed to validate test specific event\");\n+\t\t}\n+\n+\t\t++index;\n+\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t\tif (++events >= total_events)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn check_excess_events(port);\n+}\n+\n+static int\n+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)\n+{\n+\tRTE_SET_USED(port);\n+\tRTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),\n+\t\t\t      \"index=%d != seqn=%d\", index,\n+\t\t\t      *rte_event_pmd_selftest_seqn(ev->mbuf));\n+\treturn 0;\n+}\n+\n+static inline int\n+test_simple_enqdeq(uint8_t sched_type)\n+{\n+\tint ret;\n+\n+\tret = inject_events(0 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t    0 /* sub_event_type */, sched_type, 0 /* queue */,\n+\t\t\t    0 /* port */, MAX_EVENTS);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);\n+}\n+\n+static int\n+test_simple_enqdeq_ordered(void)\n+{\n+\treturn test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_simple_enqdeq_atomic(void)\n+{\n+\treturn test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_simple_enqdeq_parallel(void)\n+{\n+\treturn test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+/*\n+ * Generate a prescribed number of events and spread them across available\n+ * queues. On dequeue, using single event port(port 0) verify the enqueued\n+ * event attributes\n+ */\n+static int\n+test_multi_queue_enq_single_port_deq(void)\n+{\n+\tint ret;\n+\n+\tret = generate_random_events(MAX_EVENTS);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn consume_events(0 /* port */, MAX_EVENTS, NULL);\n+}\n+\n+/*\n+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus\n+ * operation\n+ *\n+ * For example, Inject 32 events over 0..7 queues\n+ * enqueue events 0, 8, 16, 24 in queue 0\n+ * enqueue events 1, 9, 17, 25 in queue 1\n+ * ..\n+ * ..\n+ * enqueue events 7, 15, 23, 31 in queue 7\n+ *\n+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31\n+ * order from queue0(highest priority) to queue7(lowest_priority)\n+ */\n+static int\n+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)\n+{\n+\tuint32_t queue_count;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\tif (queue_count > 8)\n+\t\tqueue_count = 8;\n+\tuint32_t range = MAX_EVENTS / queue_count;\n+\tuint32_t expected_val = (index % range) * queue_count;\n+\n+\texpected_val += ev->queue_id;\n+\tRTE_SET_USED(port);\n+\tRTE_TEST_ASSERT_EQUAL(\n+\t\t*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,\n+\t\t\"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d\",\n+\t\t*rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,\n+\t\trange, queue_count, MAX_EVENTS);\n+\treturn 0;\n+}\n+\n+static int\n+test_multi_queue_priority(void)\n+{\n+\tint i, max_evts_roundoff;\n+\t/* See validate_queue_priority() comments for priority validate logic */\n+\tuint32_t queue_count;\n+\tstruct rte_mbuf *m;\n+\tuint8_t queue;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\tif (queue_count > 8)\n+\t\tqueue_count = 8;\n+\tmax_evts_roundoff = MAX_EVENTS / queue_count;\n+\tmax_evts_roundoff *= queue_count;\n+\n+\tfor (i = 0; i < max_evts_roundoff; i++) {\n+\t\tstruct rte_event ev = {.event = 0, .u64 = 0};\n+\n+\t\tm = rte_pktmbuf_alloc(eventdev_test_mempool);\n+\t\tRTE_TEST_ASSERT_NOT_NULL(m, \"mempool alloc failed\");\n+\n+\t\t*rte_event_pmd_selftest_seqn(m) = i;\n+\t\tqueue = i % queue_count;\n+\t\tupdate_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,\n+\t\t\t\t\t\t 0, RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t queue, 0);\n+\t\trte_event_enqueue_burst(evdev, 0, &ev, 1);\n+\t}\n+\n+\treturn consume_events(0, max_evts_roundoff, validate_queue_priority);\n+}\n+\n+static int\n+worker_multi_port_fn(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint32_t *total_events = param->total_events;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\tint ret;\n+\n+\twhile (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tret = validate_event(&ev);\n+\t\tRTE_TEST_ASSERT_SUCCESS(ret, \"Failed to validate event\");\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+wait_workers_to_join(const uint32_t *count)\n+{\n+\tuint64_t cycles, print_cycles;\n+\n+\tcycles = rte_get_timer_cycles();\n+\tprint_cycles = cycles;\n+\twhile (__atomic_load_n(count, __ATOMIC_RELAXED)) {\n+\t\tuint64_t new_cycles = rte_get_timer_cycles();\n+\n+\t\tif (new_cycles - print_cycles > rte_get_timer_hz()) {\n+\t\t\tplt_info(\"Events %d\",\n+\t\t\t\t __atomic_load_n(count, __ATOMIC_RELAXED));\n+\t\t\tprint_cycles = new_cycles;\n+\t\t}\n+\t\tif (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {\n+\t\t\tplt_err(\"No schedules for seconds, deadlock (%d)\",\n+\t\t\t\t__atomic_load_n(count, __ATOMIC_RELAXED));\n+\t\t\trte_event_dev_dump(evdev, stdout);\n+\t\t\tcycles = new_cycles;\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\trte_eal_mp_wait_lcore();\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+launch_workers_and_wait(int (*main_thread)(void *),\n+\t\t\tint (*worker_thread)(void *), uint32_t total_events,\n+\t\t\tuint8_t nb_workers, uint8_t sched_type)\n+{\n+\tuint32_t atomic_total_events;\n+\tstruct test_core_param *param;\n+\tuint64_t dequeue_tmo_ticks;\n+\tuint8_t port = 0;\n+\tint w_lcore;\n+\tint ret;\n+\n+\tif (!nb_workers)\n+\t\treturn 0;\n+\n+\t__atomic_store_n(&atomic_total_events, total_events, __ATOMIC_RELAXED);\n+\tseqn_list_init();\n+\n+\tparam = malloc(sizeof(struct test_core_param) * nb_workers);\n+\tif (!param)\n+\t\treturn -1;\n+\n+\tret = rte_event_dequeue_timeout_ticks(\n+\t\tevdev, rte_rand() % 10000000 /* 10ms */, &dequeue_tmo_ticks);\n+\tif (ret) {\n+\t\tfree(param);\n+\t\treturn -1;\n+\t}\n+\n+\tparam[0].total_events = &atomic_total_events;\n+\tparam[0].sched_type = sched_type;\n+\tparam[0].port = 0;\n+\tparam[0].dequeue_tmo_ticks = dequeue_tmo_ticks;\n+\trte_wmb();\n+\n+\tw_lcore = rte_get_next_lcore(\n+\t\t/* start core */ -1,\n+\t\t/* skip main */ 1,\n+\t\t/* wrap */ 0);\n+\trte_eal_remote_launch(main_thread, &param[0], w_lcore);\n+\n+\tfor (port = 1; port < nb_workers; port++) {\n+\t\tparam[port].total_events = &atomic_total_events;\n+\t\tparam[port].sched_type = sched_type;\n+\t\tparam[port].port = port;\n+\t\tparam[port].dequeue_tmo_ticks = dequeue_tmo_ticks;\n+\t\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\t\tw_lcore = rte_get_next_lcore(w_lcore, 1, 0);\n+\t\trte_eal_remote_launch(worker_thread, &param[port], w_lcore);\n+\t}\n+\n+\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\tret = wait_workers_to_join(&atomic_total_events);\n+\tfree(param);\n+\n+\treturn ret;\n+}\n+\n+/*\n+ * Generate a prescribed number of events and spread them across available\n+ * queues. Dequeue the events through multiple ports and verify the enqueued\n+ * event attributes\n+ */\n+static int\n+test_multi_queue_enq_multi_port_deq(void)\n+{\n+\tconst unsigned int total_events = MAX_EVENTS;\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tret = generate_random_events(total_events);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &nr_ports),\n+\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (!nr_ports) {\n+\t\tplt_err(\"Not enough ports=%d or workers=%d\", nr_ports,\n+\t\t\trte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\treturn launch_workers_and_wait(worker_multi_port_fn,\n+\t\t\t\t       worker_multi_port_fn, total_events,\n+\t\t\t\t       nr_ports, 0xff /* invalid */);\n+}\n+\n+static void\n+flush(uint8_t dev_id, struct rte_event event, void *arg)\n+{\n+\tunsigned int *count = arg;\n+\n+\tRTE_SET_USED(dev_id);\n+\tif (event.event_type == RTE_EVENT_TYPE_CPU)\n+\t\t*count = *count + 1;\n+}\n+\n+static int\n+test_dev_stop_flush(void)\n+{\n+\tunsigned int total_events = MAX_EVENTS, count = 0;\n+\tint ret;\n+\n+\tret = generate_random_events(total_events);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);\n+\tif (ret)\n+\t\treturn -2;\n+\trte_event_dev_stop(evdev);\n+\tret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);\n+\tif (ret)\n+\t\treturn -3;\n+\tRTE_TEST_ASSERT_EQUAL(total_events, count,\n+\t\t\t      \"count mismatch total_events=%d count=%d\",\n+\t\t\t      total_events, count);\n+\n+\treturn 0;\n+}\n+\n+static int\n+validate_queue_to_port_single_link(uint32_t index, uint8_t port,\n+\t\t\t\t   struct rte_event *ev)\n+{\n+\tRTE_SET_USED(index);\n+\tRTE_TEST_ASSERT_EQUAL(port, ev->queue_id,\n+\t\t\t      \"queue mismatch enq=%d deq =%d\", port,\n+\t\t\t      ev->queue_id);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Link queue x to port x and check correctness of link by checking\n+ * queue_id == x on dequeue on the specific port x\n+ */\n+static int\n+test_queue_to_port_single_link(void)\n+{\n+\tint i, nr_links, ret;\n+\tuint32_t queue_count;\n+\tuint32_t port_count;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &port_count),\n+\t\t\"Port count get failed\");\n+\n+\t/* Unlink all connections that created in eventdev_setup */\n+\tfor (i = 0; i < (int)port_count; i++) {\n+\t\tret = rte_event_port_unlink(evdev, i, NULL, 0);\n+\t\tRTE_TEST_ASSERT(ret >= 0, \"Failed to unlink all queues port=%d\",\n+\t\t\t\ti);\n+\t}\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\n+\tnr_links = RTE_MIN(port_count, queue_count);\n+\tconst unsigned int total_events = MAX_EVENTS / nr_links;\n+\n+\t/* Link queue x to port x and inject events to queue x through port x */\n+\tfor (i = 0; i < nr_links; i++) {\n+\t\tuint8_t queue = (uint8_t)i;\n+\n+\t\tret = rte_event_port_link(evdev, i, &queue, NULL, 1);\n+\t\tRTE_TEST_ASSERT(ret == 1, \"Failed to link queue to port %d\", i);\n+\n+\t\tret = inject_events(0x100 /*flow_id */,\n+\t\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t\t    rte_rand() % 256 /* sub_event_type */,\n+\t\t\t\t    rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),\n+\t\t\t\t    queue /* queue */, i /* port */,\n+\t\t\t\t    total_events /* events */);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\t}\n+\n+\t/* Verify the events generated from correct queue */\n+\tfor (i = 0; i < nr_links; i++) {\n+\t\tret = consume_events(i /* port */, total_events,\n+\t\t\t\t     validate_queue_to_port_single_link);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,\n+\t\t\t\t  struct rte_event *ev)\n+{\n+\tRTE_SET_USED(index);\n+\tRTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),\n+\t\t\t      \"queue mismatch enq=%d deq =%d\", port,\n+\t\t\t      ev->queue_id);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Link all even number of queues to port 0 and all odd number of queues to\n+ * port 1 and verify the link connection on dequeue\n+ */\n+static int\n+test_queue_to_port_multi_link(void)\n+{\n+\tint ret, port0_events = 0, port1_events = 0;\n+\tuint32_t nr_queues = 0;\n+\tuint32_t nr_ports = 0;\n+\tuint8_t queue, port;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &nr_queues),\n+\t\t\"Queue count get failed\");\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &nr_queues),\n+\t\t\"Queue count get failed\");\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &nr_ports),\n+\t\t\"Port count get failed\");\n+\n+\tif (nr_ports < 2) {\n+\t\tplt_err(\"Not enough ports to test ports=%d\", nr_ports);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Unlink all connections that created in eventdev_setup */\n+\tfor (port = 0; port < nr_ports; port++) {\n+\t\tret = rte_event_port_unlink(evdev, port, NULL, 0);\n+\t\tRTE_TEST_ASSERT(ret >= 0, \"Failed to unlink all queues port=%d\",\n+\t\t\t\tport);\n+\t}\n+\n+\tunsigned int total_events = MAX_EVENTS / nr_queues;\n+\tif (!total_events) {\n+\t\tnr_queues = MAX_EVENTS;\n+\t\ttotal_events = MAX_EVENTS / nr_queues;\n+\t}\n+\n+\t/* Link all even number of queues to port0 and odd numbers to port 1*/\n+\tfor (queue = 0; queue < nr_queues; queue++) {\n+\t\tport = queue & 0x1;\n+\t\tret = rte_event_port_link(evdev, port, &queue, NULL, 1);\n+\t\tRTE_TEST_ASSERT(ret == 1, \"Failed to link queue=%d to port=%d\",\n+\t\t\t\tqueue, port);\n+\n+\t\tret = inject_events(0x100 /*flow_id */,\n+\t\t\t\t    RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t\t\t    rte_rand() % 256 /* sub_event_type */,\n+\t\t\t\t    rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),\n+\t\t\t\t    queue /* queue */, port /* port */,\n+\t\t\t\t    total_events /* events */);\n+\t\tif (ret)\n+\t\t\treturn -1;\n+\n+\t\tif (port == 0)\n+\t\t\tport0_events += total_events;\n+\t\telse\n+\t\t\tport1_events += total_events;\n+\t}\n+\n+\tret = consume_events(0 /* port */, port0_events,\n+\t\t\t     validate_queue_to_port_multi_link);\n+\tif (ret)\n+\t\treturn -1;\n+\tret = consume_events(1 /* port */, port1_events,\n+\t\t\t     validate_queue_to_port_multi_link);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+worker_flow_based_pipeline(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;\n+\tuint32_t *total_events = param->total_events;\n+\tuint8_t new_sched_type = param->sched_type;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\twhile (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,\n+\t\t\t\t\t\t      dequeue_tmo_ticks);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\t/* Events from stage 0 */\n+\t\tif (ev.sub_event_type == 0) {\n+\t\t\t/* Move to atomic flow to maintain the ordering */\n+\t\t\tev.flow_id = 0x2;\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.sub_event_type = 1; /* stage 1 */\n+\t\t\tev.sched_type = new_sched_type;\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t} else if (ev.sub_event_type == 1) { /* Events from stage 1*/\n+\t\t\tuint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);\n+\n+\t\t\tif (seqn_list_update(seqn) == 0) {\n+\t\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\t\t__atomic_sub_fetch(total_events, 1,\n+\t\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\t} else {\n+\t\t\t\tplt_err(\"Failed to update seqn_list\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tplt_err(\"Invalid ev.sub_event_type = %d\",\n+\t\t\t\tev.sub_event_type);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+test_multiport_flow_sched_type_test(uint8_t in_sched_type,\n+\t\t\t\t    uint8_t out_sched_type)\n+{\n+\tconst unsigned int total_events = MAX_EVENTS;\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &nr_ports),\n+\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (!nr_ports) {\n+\t\tplt_err(\"Not enough ports=%d or workers=%d\", nr_ports,\n+\t\t\trte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Injects events with a 0 sequence number to total_events */\n+\tret = inject_events(\n+\t\t0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t0 /* sub_event_type (stage 0) */, in_sched_type, 0 /* queue */,\n+\t\t0 /* port */, total_events /* events */);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\trte_mb();\n+\tret = launch_workers_and_wait(worker_flow_based_pipeline,\n+\t\t\t\t      worker_flow_based_pipeline, total_events,\n+\t\t\t\t      nr_ports, out_sched_type);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tif (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&\n+\t    out_sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t/* Check the events order maintained or not */\n+\t\treturn seqn_list_check(total_events);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Multi port ordered to atomic transaction */\n+static int\n+test_multi_port_flow_ordered_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_flow_ordered_to_ordered(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_flow_ordered_to_parallel(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_flow_atomic_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_flow_atomic_to_ordered(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_flow_atomic_to_parallel(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_flow_parallel_to_atomic(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_flow_parallel_to_ordered(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_flow_parallel_to_parallel(void)\n+{\n+\treturn test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t   RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+worker_group_based_pipeline(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;\n+\tuint32_t *total_events = param->total_events;\n+\tuint8_t new_sched_type = param->sched_type;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\twhile (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,\n+\t\t\t\t\t\t      dequeue_tmo_ticks);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\t/* Events from stage 0(group 0) */\n+\t\tif (ev.queue_id == 0) {\n+\t\t\t/* Move to atomic flow to maintain the ordering */\n+\t\t\tev.flow_id = 0x2;\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.sched_type = new_sched_type;\n+\t\t\tev.queue_id = 1; /* Stage 1*/\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/\n+\t\t\tuint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);\n+\n+\t\t\tif (seqn_list_update(seqn) == 0) {\n+\t\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\t\t__atomic_sub_fetch(total_events, 1,\n+\t\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\t} else {\n+\t\t\t\tplt_err(\"Failed to update seqn_list\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tplt_err(\"Invalid ev.queue_id = %d\", ev.queue_id);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_multiport_queue_sched_type_test(uint8_t in_sched_type,\n+\t\t\t\t     uint8_t out_sched_type)\n+{\n+\tconst unsigned int total_events = MAX_EVENTS;\n+\tuint32_t queue_count;\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &nr_ports),\n+\t\t\"Port count get failed\");\n+\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\tif (queue_count < 2 || !nr_ports) {\n+\t\tplt_err(\"Not enough queues=%d ports=%d or workers=%d\",\n+\t\t\tqueue_count, nr_ports, rte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Injects events with a 0 sequence number to total_events */\n+\tret = inject_events(\n+\t\t0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t0 /* sub_event_type (stage 0) */, in_sched_type, 0 /* queue */,\n+\t\t0 /* port */, total_events /* events */);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tret = launch_workers_and_wait(worker_group_based_pipeline,\n+\t\t\t\t      worker_group_based_pipeline, total_events,\n+\t\t\t\t      nr_ports, out_sched_type);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\tif (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&\n+\t    out_sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t/* Check the events order maintained or not */\n+\t\treturn seqn_list_check(total_events);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_multi_port_queue_ordered_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_queue_ordered_to_ordered(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_queue_ordered_to_parallel(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_queue_atomic_to_atomic(void)\n+{\n+\t/* Ingress event order test */\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_queue_atomic_to_ordered(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_queue_atomic_to_parallel(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+test_multi_port_queue_parallel_to_atomic(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ATOMIC);\n+}\n+\n+static int\n+test_multi_port_queue_parallel_to_ordered(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_ORDERED);\n+}\n+\n+static int\n+test_multi_port_queue_parallel_to_parallel(void)\n+{\n+\treturn test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t\t\t\t    RTE_SCHED_TYPE_PARALLEL);\n+}\n+\n+static int\n+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint32_t *total_events = param->total_events;\n+\tuint8_t port = param->port;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\twhile (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tif (ev.sub_event_type == MAX_STAGES) { /* last stage */\n+\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\t__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);\n+\t\t} else {\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.sub_event_type++;\n+\t\t\tev.sched_type =\n+\t\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))\n+{\n+\tuint32_t nr_ports;\n+\tint ret;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &nr_ports),\n+\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (!nr_ports) {\n+\t\tplt_err(\"Not enough ports=%d or workers=%d\", nr_ports,\n+\t\t\trte_lcore_count() - 1);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Injects events with a 0 sequence number to total_events */\n+\tret = inject_events(\n+\t\t0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,\n+\t\t0 /* sub_event_type (stage 0) */,\n+\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,\n+\t\t0 /* queue */, 0 /* port */, MAX_EVENTS /* events */);\n+\tif (ret)\n+\t\treturn -1;\n+\n+\treturn launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,\n+\t\t\t\t       0xff /* invalid */);\n+}\n+\n+/* Flow based pipeline with maximum stages with random sched type */\n+static int\n+test_multi_port_flow_max_stages_random_sched_type(void)\n+{\n+\treturn launch_multi_port_max_stages_random_sched_type(\n+\t\tworker_flow_based_pipeline_max_stages_rand_sched_type);\n+}\n+\n+static int\n+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint8_t port = param->port;\n+\tuint32_t queue_count;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\tuint8_t nr_queues = queue_count;\n+\tuint32_t *total_events = param->total_events;\n+\n+\twhile (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tif (ev.queue_id == nr_queues - 1) { /* last stage */\n+\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\t__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);\n+\t\t} else {\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.queue_id++;\n+\t\t\tev.sched_type =\n+\t\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Queue based pipeline with maximum stages with random sched type */\n+static int\n+test_multi_port_queue_max_stages_random_sched_type(void)\n+{\n+\treturn launch_multi_port_max_stages_random_sched_type(\n+\t\tworker_queue_based_pipeline_max_stages_rand_sched_type);\n+}\n+\n+static int\n+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint8_t port = param->port;\n+\tuint32_t queue_count;\n+\tuint16_t valid_event;\n+\tstruct rte_event ev;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,\n+\t\t\t\t       &queue_count),\n+\t\t\"Queue count get failed\");\n+\tuint8_t nr_queues = queue_count;\n+\tuint32_t *total_events = param->total_events;\n+\n+\twhile (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {\n+\t\tvalid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);\n+\t\tif (!valid_event)\n+\t\t\tcontinue;\n+\n+\t\tif (ev.queue_id == nr_queues - 1) { /* Last stage */\n+\t\t\trte_pktmbuf_free(ev.mbuf);\n+\t\t\t__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);\n+\t\t} else {\n+\t\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\t\tev.queue_id++;\n+\t\t\tev.sub_event_type = rte_rand() % 256;\n+\t\t\tev.sched_type =\n+\t\t\t\trte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Queue and flow based pipeline with maximum stages with random sched type */\n+static int\n+test_multi_port_mixed_max_stages_random_sched_type(void)\n+{\n+\treturn launch_multi_port_max_stages_random_sched_type(\n+\t\tworker_mixed_pipeline_max_stages_rand_sched_type);\n+}\n+\n+static int\n+worker_ordered_flow_producer(void *arg)\n+{\n+\tstruct test_core_param *param = arg;\n+\tuint8_t port = param->port;\n+\tstruct rte_mbuf *m;\n+\tint counter = 0;\n+\n+\twhile (counter < NUM_PACKETS) {\n+\t\tm = rte_pktmbuf_alloc(eventdev_test_mempool);\n+\t\tif (m == NULL)\n+\t\t\tcontinue;\n+\n+\t\t*rte_event_pmd_selftest_seqn(m) = counter++;\n+\n+\t\tstruct rte_event ev = {.event = 0, .u64 = 0};\n+\n+\t\tev.flow_id = 0x1; /* Generate a fat flow */\n+\t\tev.sub_event_type = 0;\n+\t\t/* Inject the new event */\n+\t\tev.op = RTE_EVENT_OP_NEW;\n+\t\tev.event_type = RTE_EVENT_TYPE_CPU;\n+\t\tev.sched_type = RTE_SCHED_TYPE_ORDERED;\n+\t\tev.queue_id = 0;\n+\t\tev.mbuf = m;\n+\t\trte_event_enqueue_burst(evdev, port, &ev, 1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+test_producer_consumer_ingress_order_test(int (*fn)(void *))\n+{\n+\tuint32_t nr_ports;\n+\n+\tRTE_TEST_ASSERT_SUCCESS(\n+\t\trte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,\n+\t\t\t\t       &nr_ports),\n+\t\t\"Port count get failed\");\n+\tnr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);\n+\n+\tif (rte_lcore_count() < 3 || nr_ports < 2) {\n+\t\tplt_err(\"### Not enough cores for test.\");\n+\t\treturn 0;\n+\t}\n+\n+\tlaunch_workers_and_wait(worker_ordered_flow_producer, fn, NUM_PACKETS,\n+\t\t\t\tnr_ports, RTE_SCHED_TYPE_ATOMIC);\n+\t/* Check the events order maintained or not */\n+\treturn seqn_list_check(NUM_PACKETS);\n+}\n+\n+/* Flow based producer consumer ingress order test */\n+static int\n+test_flow_producer_consumer_ingress_order_test(void)\n+{\n+\treturn test_producer_consumer_ingress_order_test(\n+\t\tworker_flow_based_pipeline);\n+}\n+\n+/* Queue based producer consumer ingress order test */\n+static int\n+test_queue_producer_consumer_ingress_order_test(void)\n+{\n+\treturn test_producer_consumer_ingress_order_test(\n+\t\tworker_group_based_pipeline);\n+}\n+\n+static void\n+cnxk_test_run(int (*setup)(void), void (*tdown)(void), int (*test)(void),\n+\t      const char *name)\n+{\n+\tif (setup() < 0) {\n+\t\tprintf(\"Error setting up test %s\", name);\n+\t\tunsupported++;\n+\t} else {\n+\t\tif (test() < 0) {\n+\t\t\tfailed++;\n+\t\t\tprintf(\"+ TestCase [%2d] : %s failed\\n\", total, name);\n+\t\t} else {\n+\t\t\tpassed++;\n+\t\t\tprintf(\"+ TestCase [%2d] : %s succeeded\\n\", total,\n+\t\t\t       name);\n+\t\t}\n+\t}\n+\n+\ttotal++;\n+\ttdown();\n+}\n+\n+static int\n+cnxk_sso_testsuite_run(const char *dev_name)\n+{\n+\tint rc;\n+\n+\ttestsuite_setup(dev_name);\n+\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_simple_enqdeq_ordered);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_simple_enqdeq_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_simple_enqdeq_parallel);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_queue_enq_single_port_deq);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown, test_dev_stop_flush);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_queue_enq_multi_port_deq);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_queue_to_port_single_link);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_queue_to_port_multi_link);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_ordered_to_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_ordered_to_ordered);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_ordered_to_parallel);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_atomic_to_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_atomic_to_ordered);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_atomic_to_parallel);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_parallel_to_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_parallel_to_ordered);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_parallel_to_parallel);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_ordered_to_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_ordered_to_ordered);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_ordered_to_parallel);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_atomic_to_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_atomic_to_ordered);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_atomic_to_parallel);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_parallel_to_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_parallel_to_ordered);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_parallel_to_parallel);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_flow_max_stages_random_sched_type);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_queue_max_stages_random_sched_type);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_multi_port_mixed_max_stages_random_sched_type);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_flow_producer_consumer_ingress_order_test);\n+\tCNXK_TEST_RUN(eventdev_setup, eventdev_teardown,\n+\t\t      test_queue_producer_consumer_ingress_order_test);\n+\tCNXK_TEST_RUN(eventdev_setup_priority, eventdev_teardown,\n+\t\t      test_multi_queue_priority);\n+\tCNXK_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,\n+\t\t      test_multi_port_flow_ordered_to_atomic);\n+\tCNXK_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,\n+\t\t      test_multi_port_queue_ordered_to_atomic);\n+\tprintf(\"Total tests   : %d\\n\", total);\n+\tprintf(\"Passed        : %d\\n\", passed);\n+\tprintf(\"Failed        : %d\\n\", failed);\n+\tprintf(\"Not supported : %d\\n\", unsupported);\n+\n+\trc = failed;\n+\ttestsuite_teardown();\n+\n+\treturn rc;\n+}\n+\n+int\n+cnxk_sso_selftest(const char *dev_name)\n+{\n+\tconst struct rte_memzone *mz;\n+\tstruct cnxk_sso_evdev *dev;\n+\tint rc = -1;\n+\n+\tmz = rte_memzone_lookup(CNXK_SSO_MZ_NAME);\n+\tif (mz == NULL)\n+\t\treturn rc;\n+\n+\tdev = (void *)*((uint64_t *)mz->addr);\n+\tif (roc_model_runtime_is_cn9k()) {\n+\t\t/* Verify single ws mode. */\n+\t\tprintf(\"Verifying CN9K Single workslot mode\\n\");\n+\t\tdev->dual_ws = 0;\n+\t\tcn9k_sso_set_rsrc(dev);\n+\t\tif (cnxk_sso_testsuite_run(dev_name))\n+\t\t\treturn rc;\n+\t\t/* Verift dual ws mode. */\n+\t\tprintf(\"Verifying CN9K Dual workslot mode\\n\");\n+\t\tdev->dual_ws = 1;\n+\t\tcn9k_sso_set_rsrc(dev);\n+\t\tif (cnxk_sso_testsuite_run(dev_name))\n+\t\t\treturn rc;\n+\t}\n+\n+\tif (roc_model_runtime_is_cn10k()) {\n+\t\tprintf(\"Verifying CN10K workslot getwork mode none\\n\");\n+\t\tdev->gw_mode = CN10K_GW_MODE_NONE;\n+\t\tif (cnxk_sso_testsuite_run(dev_name))\n+\t\t\treturn rc;\n+\t\tprintf(\"Verifying CN10K workslot getwork mode prefetch\\n\");\n+\t\tdev->gw_mode = CN10K_GW_MODE_PREF;\n+\t\tif (cnxk_sso_testsuite_run(dev_name))\n+\t\t\treturn rc;\n+\t\tprintf(\"Verifying CN10K workslot getwork mode smart prefetch\\n\");\n+\t\tdev->gw_mode = CN10K_GW_MODE_PREF_WFE;\n+\t\tif (cnxk_sso_testsuite_run(dev_name))\n+\t\t\treturn rc;\n+\t}\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build\nindex 5a9cc9f57..8bac4b7f3 100644\n--- a/drivers/event/cnxk/meson.build\n+++ b/drivers/event/cnxk/meson.build\n@@ -12,6 +12,7 @@ sources = files('cn10k_worker.c',\n \t\t'cn10k_eventdev.c',\n \t\t'cn9k_worker.c',\n \t\t'cn9k_eventdev.c',\n-\t\t'cnxk_eventdev.c')\n+\t\t'cnxk_eventdev.c',\n+\t\t'cnxk_sso_selftest.c')\n \n deps += ['bus_pci', 'common_cnxk', 'net_cnxk']\n",
    "prefixes": [
        "18/36"
    ]
}