get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/17051/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 17051,
    "url": "http://patches.dpdk.org/api/patches/17051/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1479319207-130646-6-git-send-email-harry.van.haaren@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1479319207-130646-6-git-send-email-harry.van.haaren@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1479319207-130646-6-git-send-email-harry.van.haaren@intel.com",
    "date": "2016-11-16T18:00:05",
    "name": "[dpdk-dev,5/7] test/eventdev: unit and functional tests",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0ee2c8056f68a12fb9ef5c7e49449db1f872e69a",
    "submitter": {
        "id": 317,
        "url": "http://patches.dpdk.org/api/people/317/?format=api",
        "name": "Van Haaren, Harry",
        "email": "harry.van.haaren@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1479319207-130646-6-git-send-email-harry.van.haaren@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/17051/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/17051/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 113D45A0A;\n\tWed, 16 Nov 2016 19:01:28 +0100 (CET)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby dpdk.org (Postfix) with ESMTP id DA93B567E\n\tfor <dev@dpdk.org>; Wed, 16 Nov 2016 19:00:26 +0100 (CET)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby orsmga101.jf.intel.com with ESMTP; 16 Nov 2016 10:00:25 -0800",
            "from sie-lab-212-222.ir.intel.com (HELO\n\tsilpixa00398672.ir.intel.com) ([10.237.212.222])\n\tby fmsmga001.fm.intel.com with ESMTP; 16 Nov 2016 10:00:24 -0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos; i=\"5.31,649,1473145200\"; d=\"scan'208\";\n\ta=\"1069396489\"",
        "From": "Harry van Haaren <harry.van.haaren@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Harry van Haaren <harry.van.haaren@intel.com>,\n\tGage Eads <gage.eads@intel.com>, David Hunt <david.hunt@intel.com>",
        "Date": "Wed, 16 Nov 2016 18:00:05 +0000",
        "Message-Id": "<1479319207-130646-6-git-send-email-harry.van.haaren@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1479319207-130646-1-git-send-email-harry.van.haaren@intel.com>",
        "References": "<1479319207-130646-1-git-send-email-harry.van.haaren@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 5/7] test/eventdev: unit and functional tests",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This commit adds basic unit and functional tests for the eventdev\nAPI. The test code is added in this commit, but not yet enabled until\nthe next commit.\n\nSigned-off-by: Gage Eads <gage.eads@intel.com>\nSigned-off-by: David Hunt <david.hunt@intel.com>\nSigned-off-by: Harry van Haaren <harry.van.haaren@intel.com>\n---\n app/test/test_eventdev_func.c | 1268 +++++++++++++++++++++++++++++++++++++++++\n app/test/test_eventdev_unit.c |  557 ++++++++++++++++++\n 2 files changed, 1825 insertions(+)\n create mode 100644 app/test/test_eventdev_func.c\n create mode 100644 app/test/test_eventdev_unit.c",
    "diff": "diff --git a/app/test/test_eventdev_func.c b/app/test/test_eventdev_func.c\nnew file mode 100644\nindex 0000000..d7fe481\n--- /dev/null\n+++ b/app/test/test_eventdev_func.c\n@@ -0,0 +1,1268 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdio.h>\n+#include <string.h>\n+#include <stdint.h>\n+#include <errno.h>\n+#include <unistd.h>\n+#include <sys/queue.h>\n+\n+#include <rte_memory.h>\n+#include <rte_memzone.h>\n+#include <rte_launch.h>\n+#include <rte_eal.h>\n+#include <rte_per_lcore.h>\n+#include <rte_lcore.h>\n+#include <rte_debug.h>\n+#include <rte_ethdev.h>\n+#include <rte_cycles.h>\n+\n+#include <rte_eventdev.h>\n+#include \"test.h\"\n+\n+#define MAX_PORTS 16\n+#define MAX_QIDS 16\n+#define NUM_PACKETS (1<<18)\n+\n+struct test {\n+\tstruct rte_mempool *mbuf_pool;\n+\tint ev;\n+\tint port[MAX_PORTS];\n+\tint qid[MAX_QIDS];\n+\tint nb_qids;\n+};\n+\n+static inline struct rte_mbuf *\n+rte_gen_arp(int portid, struct rte_mempool *mp)\n+{\n+\t/*\n+\t* len = 14 + 46\n+\t* ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46\n+\t*/\n+\tstatic const uint8_t arp_request[] = {\n+\t\t/*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,\n+\t\t0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,\n+\t\t/*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,\n+\t\t0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,\n+\t\t/*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,\n+\t\t0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t\t/*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t\t0x00, 0x00, 0x00, 0x00\n+\t};\n+\tstruct rte_mbuf *m;\n+\tint pkt_len = sizeof(arp_request) - 1;\n+\n+\tm = rte_pktmbuf_alloc(mp);\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tmemcpy((void *)((uint64_t)m->buf_addr + m->data_off),\n+\t\tarp_request, pkt_len);\n+\trte_pktmbuf_pkt_len(m) = pkt_len;\n+\trte_pktmbuf_data_len(m) = pkt_len;\n+\n+\tRTE_SET_USED(portid);\n+\t/*\n+\t * Ignore MAC address for super-simple testing\n+\t * struct ether_addr mac_addr;\n+\t * rte_eth_macaddr_get(portid, &mac_addr);\n+\t * memcpy((void *)((uint64_t)m->buf_addr + m->data_off + 6),\n+\t * &mac_addr, 6);\n+\t */\n+\n+\treturn m;\n+}\n+\n+/* initialization and config */\n+static inline int\n+init(struct test *t, int nb_queues, int nb_ports)\n+{\n+\tstruct rte_event_dev_config config = {\n+\t\t\t.nb_event_queues = nb_queues,\n+\t\t\t.nb_event_ports = nb_ports,\n+\t};\n+\tint ret, nevdevs = rte_event_dev_count();\n+\n+\tvoid *temp = t->mbuf_pool; /* save and restore mbuf pool */\n+\n+\tmemset(t, 0, sizeof(*t));\n+\tt->mbuf_pool = temp;\n+\n+\tif (nevdevs < 1) {\n+\t\tprintf(\"%d: No Eventdev Devices Found\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tconst char *eventdev_name = \"evdev_sw0\";\n+\n+\tt->ev = rte_event_dev_get_dev_id(eventdev_name);\n+\tif (t->ev < 0) {\n+\t\tprintf(\"%d: Eventdev %s not found - quitting.\\n\", __LINE__, eventdev_name);\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_event_dev_configure(t->ev, &config);\n+\tif (ret < 0)\n+\t\tprintf(\"%d: Error configuring device\\n\", __LINE__);\n+\treturn ret;\n+};\n+\n+static inline int\n+create_ports(struct test *t, int num_ports)\n+{\n+\tint i;\n+\tstatic const struct rte_event_port_conf conf = {\n+\t\t\t.dequeue_queue_depth = 32,\n+\t\t\t.enqueue_queue_depth = 64,\n+\t};\n+\n+\tfor (i = 0; i < num_ports; i++) {\n+\t\tif (rte_event_port_setup(t->ev, i, &conf) < 0) {\n+\t\t\tprintf(\"Error setting up port %d\\n\", i);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tt->port[i] = i;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+create_atomic_qids(struct test *t, int num_qids)\n+{\n+\tint i;\n+\n+\t/* Q creation */\n+\tstatic const struct rte_event_queue_conf conf = {\n+\t\t\t.priority = RTE_EVENT_QUEUE_PRIORITY_NORMAL,\n+\t\t\t.nb_atomic_flows = 1024,\n+\t};\n+\n+\tfor (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {\n+\t\tif (rte_event_queue_setup(t->ev, i, &conf) < 0) {\n+\t\t\tprintf(\"%d: error creating qid %d\\n\", __LINE__, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tt->qid[i] = i;\n+\t}\n+\tt->nb_qids += num_qids;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+create_ordered_qids(struct test *t, int num_qids)\n+{\n+\tint i;\n+\n+\t/* Q creation */\n+\tstatic const struct rte_event_queue_conf conf = {\n+\t\t\t.priority = RTE_EVENT_QUEUE_PRIORITY_NORMAL,\n+\t\t\t.nb_atomic_order_sequences = 1024,\n+\t};\n+\n+\tfor (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {\n+\t\tif (rte_event_queue_setup(t->ev, i, &conf) < 0) {\n+\t\t\tprintf(\"%d: error creating qid %d\\n\", __LINE__, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tt->qid[i] = i;\n+\t}\n+\tt->nb_qids += num_qids;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+create_unordered_qids(struct test *t, int num_qids)\n+{\n+\tint i;\n+\n+\t/* Q creation */\n+\tstatic const struct rte_event_queue_conf conf = {\n+\t\t\t.priority = RTE_EVENT_QUEUE_PRIORITY_NORMAL,\n+\t};\n+\n+\tfor (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {\n+\t\tif (rte_event_queue_setup(t->ev, i, &conf) < 0) {\n+\t\t\tprintf(\"%d: error creating qid %d\\n\", __LINE__, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tt->qid[i] = i;\n+\t}\n+\tt->nb_qids += num_qids;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+create_directed_qids(struct test *t, int num_qids, int ports[])\n+{\n+\tint i;\n+\n+\t/* Q creation */\n+\tstatic const struct rte_event_queue_conf conf = {\n+\t\t\t.priority = RTE_EVENT_QUEUE_PRIORITY_NORMAL,\n+\t\t\t.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_CONSUMER,\n+\t};\n+\n+\tfor (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {\n+\t\tstruct rte_event_queue_link link;\n+\n+\t\tif (rte_event_queue_setup(t->ev, i, &conf) < 0) {\n+\t\t\tprintf(\"%d: error creating qid %d\\n\", __LINE__, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tt->qid[i] = i;\n+\n+\t\tlink = (struct rte_event_queue_link){\n+\t\t\tt->qid[i],\n+\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL\n+\t\t};\n+\t\tif (rte_event_port_link(t->ev, ports[i - t->nb_qids], &link, 1) != 1) {\n+\t\t\tprintf(\"%d: error creating link for qid %d\\n\",\n+\t\t\t\t\t__LINE__, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\tt->nb_qids += num_qids;\n+\n+\treturn 0;\n+}\n+\n+/* destruction */\n+static inline int\n+cleanup(struct test *t)\n+{\n+\trte_event_dev_stop(t->ev);\n+\trte_event_dev_close(t->ev);\n+\treturn 0;\n+};\n+\n+/* run_prio_packet_test\n+ * This performs a basic packet priority check on the test instance passed in.\n+ * It is factored out of the main priority tests as the same tests must be\n+ * performed to ensure prioritization of each type of QID.\n+ *\n+ * Requirements:\n+ *  - An initialized test structure, including mempool\n+ *  - t->port[0] is initialized for both Enq / Deq of packets to the QID\n+ *  - t->qid[0] is the QID to be tested\n+ *  - if LB QID, the CQ must be mapped to the QID.\n+ */\n+static int\n+run_prio_packet_test(struct test *t)\n+{\n+\tint err;\n+\tconst uint32_t MAGIC_SEQN[] = {4711, 1234};\n+\tconst uint32_t PRIORITY[] = {3, 0};\n+\tunsigned i;\n+\tfor(i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {\n+\t\t/* generate pkt and enqueue */\n+\t\tstruct rte_event ev;\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tarp->seqn = MAGIC_SEQN[i];\n+\n+\t\tev = (struct rte_event){\n+\t\t\t.priority = PRIORITY[i],\n+\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t.queue_id = t->qid[0],\n+\t\t\t.mbuf = arp\n+\t\t};\n+\t\terr = rte_event_enqueue(t->ev, t->port[0], &ev, 0);\n+\t\tif (err < 0) {\n+\t\t\tprintf(\"%d: error failed to enqueue\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\trte_event_schedule(t->ev);\n+\n+\tstruct rte_event_dev_stats stats;\n+\terr = rte_event_dev_stats_get(t->ev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: error failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (stats.port_rx_pkts[t->port[0]] != 2) {\n+\t\tprintf(\"%d: error stats incorrect for directed port\\n\", __LINE__);\n+\t\trte_event_dev_dump(stdout, t->ev);\n+\t\treturn -1;\n+\t}\n+\n+\tstruct rte_event ev, ev2;\n+\tuint32_t deq_pkts;\n+\tdeq_pkts = rte_event_dequeue(t->ev, t->port[0], &ev, 0);\n+\tif (deq_pkts != 1) {\n+\t\tprintf(\"%d: error failed to deq\\n\", __LINE__);\n+\t\trte_event_dev_dump(stdout, t->ev);\n+\t\treturn -1;\n+\t}\n+\tif(ev.mbuf->seqn != MAGIC_SEQN[1]) {\n+\t\tprintf(\"%d: first packet out not highest priority\\n\", __LINE__);\n+\t\trte_event_dev_dump(stdout, t->ev);\n+\t\treturn -1;\n+\t}\n+\trte_pktmbuf_free(ev.mbuf);\n+\n+\tdeq_pkts = rte_event_dequeue(t->ev, t->port[0], &ev2, 0);\n+\tif (deq_pkts != 1) {\n+\t\tprintf(\"%d: error failed to deq\\n\", __LINE__);\n+\t\trte_event_dev_dump(stdout, t->ev);\n+\t\treturn -1;\n+\t}\n+\tif(ev2.mbuf->seqn != MAGIC_SEQN[0]) {\n+\t\tprintf(\"%d: second packet out not lower priority\\n\", __LINE__);\n+\t\trte_event_dev_dump(stdout, t->ev);\n+\t\treturn -1;\n+\t}\n+\trte_pktmbuf_free(ev2.mbuf);\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+test_single_directed_packet(struct test *t)\n+{\n+\tconst int rx_enq = 0;\n+\tconst int wrk_enq = 2;\n+\tint err;\n+\n+\t/* Create instance with 3 directed QIDs going to 3 ports */\n+\tif (init(t, 3, 3) < 0 ||\n+\t\t\tcreate_ports(t, 3) < 0 ||\n+\t\t\tcreate_directed_qids(t, 3, t->port) < 0)\n+\t\treturn -1;\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/************** FORWARD ****************/\n+\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\tstruct rte_event ev = {\n+\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t.queue_id = wrk_enq,\n+\t\t\t.mbuf = arp,\n+\t};\n+\n+\tif (!arp) {\n+\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tconst uint32_t MAGIC_SEQN = 4711;\n+\tarp->seqn = MAGIC_SEQN;\n+\n+\t/* generate pkt and enqueue */\n+\terr = rte_event_enqueue(t->ev, rx_enq, &ev, 0);\n+\tif (err < 0) {\n+\t\tprintf(\"%d: error failed to enqueue\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Run schedule() as dir packets may need to be re-ordered */\n+\tif (rte_event_schedule(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with schedule call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tstruct rte_event_dev_stats stats;\n+\terr = rte_event_dev_stats_get(t->ev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: error failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (stats.port_rx_pkts[rx_enq] != 1) {\n+\t\tprintf(\"%d: error stats incorrect for directed port\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t deq_pkts;\n+\tdeq_pkts = rte_event_dequeue(t->ev, wrk_enq, &ev, 1);\n+\tif (deq_pkts != 1) {\n+\t\tprintf(\"%d: error failed to deq\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\terr = rte_event_dev_stats_get(t->ev, &stats);\n+\tif (stats.port_rx_pkts[wrk_enq] != 0 &&\n+\t\t\tstats.port_rx_pkts[wrk_enq] != 1) {\n+\t\tprintf(\"%d: error directed stats post-dequeue\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (ev.mbuf->seqn != MAGIC_SEQN) {\n+\t\tprintf(\"%d: error magic sequence number not dequeued\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\trte_pktmbuf_free(ev.mbuf);\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+test_overload_trip(struct test *t)\n+{\n+\tint err;\n+\n+\t/* Create instance with 3 directed QIDs going to 3 ports */\n+\tif (init(t, 1, 1) < 0 ||\n+\t\t\tcreate_ports(t, 1) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0)\n+\t\treturn -1;\n+\n+\tstruct rte_event_queue_link link = {t->qid[0],\n+\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\tint ret = rte_event_port_link(t->ev, t->port[0], &link, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: error mapping lb qid0\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\tif (!arp) {\n+\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* 512 packets is threshold\n+\t * iters 0 - 511 is 512 packets, then overload will be flagged\n+\t * iter 512 (the 513th pkt) is the first refused NEW packet */\n+\tconst uint32_t THRES = (256+1);\n+\tuint32_t i;\n+\tfor (i = 0; i < THRES; i++) {\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = t->qid[0],\n+\t\t\t\t.mbuf = arp,\n+\t\t};\n+\t\terr = rte_event_enqueue(t->ev, 0, &ev, 0);\n+\t\tif(i == THRES-1) {\n+\t\t\tif(err != -ENOSPC) {\n+\t\t\t\tprintf(\"%d: overload trip didn't cause NEW pkt enq fail\\n\", __LINE__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\telse {\n+\t\t\t\t//printf(\"iter %d -ENOSPC returned for new enq as expected.\\n\", i);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tif (err < 0) {\n+\t\t\t\tprintf(\"%d: error failed to enqueue\\n\", __LINE__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < THRES; i++) {\n+\t\tif (rte_event_schedule(t->ev) < 0) {\n+\t\t\tprintf(\"%d: Error with schedule call\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tuint32_t deq_pkts;\n+\t\tstruct rte_event ev;\n+\t\tdeq_pkts = rte_event_dequeue(t->ev, 0, &ev, 1);\n+\n+\t\t/* i == THRES-1 *should* fail to deq, due to NEW pkt rejection\n+\t\t * when enqueue is attempted in overload mode */\n+\t\tif (i == (THRES-1) && deq_pkts == 0)\n+\t\t\tbreak;\n+\n+\t\tif (deq_pkts != 1) {\n+\t\t\tprintf(\"%d: warning failed to deq event i = %d\\n\",\n+\t\t\t\t\t__LINE__, i);\n+\t\t\t//return -1;\n+\t\t}\n+\t}\n+\n+\trte_pktmbuf_free(arp);\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+test_directed_overload(struct test *t)\n+{\n+\tint err;\n+\n+\t/* Create instance with 3 directed QIDs going to 3 ports */\n+\tif (init(t, 1, 1) < 0 ||\n+\t\t\tcreate_ports(t, 1) < 0 ||\n+\t\t\tcreate_directed_qids(t, 1, t->port) < 0)\n+\t\treturn -1;\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* 512 packets is threshold\n+\t * iters 0 - 511 is 512 packets, then overload will be flagged\n+\t * iter 512 (the 513th pkt) is the first refused NEW packet */\n+\tconst uint32_t THRES = (256+1);\n+\tuint32_t i;\n+\tfor (i = 0; i < THRES; i++) {\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = t->qid[0],\n+\t\t\t\t.event = (uintptr_t)i,\n+\t\t};\n+\t\terr = rte_event_enqueue(t->ev, 0, &ev, 0);\n+\t\tif(i == THRES-1) {\n+\t\t\tif(err != -ENOSPC) {\n+\t\t\t\tprintf(\"%d: overload trip didn't cause NEW pkt enq fail\\n\", __LINE__);\n+\t\t\t\t//return -1;\n+\t\t\t}\n+\t\t\telse {\n+\t\t\t\t//printf(\"iter %d -ENOSPC returned for new enq as expected.\\n\", i);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tif (err < 0) {\n+\t\t\t\tprintf(\"%d: error failed to enqueue\\n\", __LINE__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (rte_event_schedule(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with schedule call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t pkt_deq_cntr = 0;\n+\tfor (i = 0; i < THRES; i++) {\n+\t\tif (rte_event_schedule(t->ev) < 0) {\n+\t\t\tprintf(\"%d: Error with schedule call\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tint32_t deq_pkts;\n+\t\tstruct rte_event ev;\n+\t\tdeq_pkts = rte_event_dequeue(t->ev, 0, &ev, 1);\n+\n+\t\t/* i == THRES-1 *should* fail to deq, due to NEW pkt rejection\n+\t\t * when enqueue is attempted in overload mode */\n+\t\tif (i == (THRES-1) && deq_pkts == 0)\n+\t\t\tbreak;\n+\n+\t\tif (deq_pkts != 1) {\n+\t\t\tprintf(\"%d: warning failed to deq (iter = %d), ret %d. Dumping stats\\n\",\n+\t\t\t\t\t__LINE__, i, deq_pkts);\n+\t\t\trte_event_dev_dump(stdout, t->ev);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tpkt_deq_cntr += deq_pkts;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+\n+static int\n+test_priority_directed(struct test *t)\n+{\n+\tif (init(t, 1, 1) < 0 ||\n+\t\t\tcreate_ports(t, 1) < 0 ||\n+\t\t\tcreate_directed_qids(t, 1, t->port) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\treturn run_prio_packet_test(t);\n+}\n+\n+static int\n+test_priority_atomic(struct test *t)\n+{\n+\tif (init(t, 1, 1) < 0 ||\n+\t\t\tcreate_ports(t, 1) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* map the QID */\n+\tstruct rte_event_queue_link link = {t->qid[0],\n+\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\tif (rte_event_port_link(t->ev, t->port[0], &link, 1) != 1) {\n+\t\tprintf(\"%d: error mapping qid to port\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\treturn run_prio_packet_test(t);\n+}\n+\n+static int\n+test_priority_ordered(struct test *t)\n+{\n+\tif (init(t, 1, 1) < 0 ||\n+\t\t\tcreate_ports(t, 1) < 0 ||\n+\t\t\tcreate_ordered_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* map the QID */\n+\tstruct rte_event_queue_link link = {t->qid[0],\n+\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\tif (rte_event_port_link(t->ev, t->port[0], &link, 1) != 1) {\n+\t\tprintf(\"%d: error mapping qid to port\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\treturn run_prio_packet_test(t);\n+}\n+\n+static int\n+test_priority_unordered(struct test *t)\n+{\n+\tif (init(t, 1, 1) < 0 ||\n+\t\t\tcreate_ports(t, 1) < 0 ||\n+\t\t\tcreate_unordered_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* map the QID */\n+\tstruct rte_event_queue_link link = {t->qid[0],\n+\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\tif (rte_event_port_link(t->ev, t->port[0], &link, 1) != 1) {\n+\t\tprintf(\"%d: error mapping qid to port\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\treturn run_prio_packet_test(t);\n+}\n+\n+static int\n+burst_packets(struct test *t)\n+{\n+\t/************** CONFIG ****************/\n+\tuint32_t i;\n+\tint err;\n+\tint ret;\n+\n+\t/* Create instance with 4 ports and 2 queues */\n+\tif (init(t, 2, 2) < 0 ||\n+\t\t\tcreate_ports(t, 2) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 2) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\tstruct rte_event_queue_link link = {t->qid[0],\n+\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\tret = rte_event_port_link(t->ev, t->port[0], &link, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: error mapping lb qid0\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tlink.queue_id = t->qid[1];\n+\tret = rte_event_port_link(t->ev, t->port[1], &link, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: error mapping lb qid1\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/************** FORWARD ****************/\n+\tconst uint32_t rx_port = 0;\n+\tconst uint32_t NUM_PKTS = 2;\n+\n+\tfor (i = 0; i < NUM_PKTS; i++) {\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: error generating pkt\\n\" , __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = i % 2,\n+\t\t\t\t.flow_id = i % 3,\n+\t\t\t\t.mbuf = arp,\n+\t\t};\n+\t\t/* generate pkt and enqueue */\n+\t\terr = rte_event_enqueue(t->ev, t->port[rx_port], &ev, 0);\n+\t\tif (err < 1) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\tint16_t pkts = rte_event_schedule(t->ev);\n+\n+\tRTE_SET_USED(pkts);\n+\n+\t/* Check stats for all NUM_PKTS arrived to sched core */\n+\tstruct rte_event_dev_stats stats;\n+\n+\terr = rte_event_dev_stats_get(t->ev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tif (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {\n+\t\tprintf(\"%d: Sched core didn't receive all %d pkts\\n\", __LINE__, NUM_PKTS);\n+\t\trte_event_dev_dump(stdout, t->ev);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t deq_pkts;\n+\tint p;\n+\n+\tdeq_pkts = 0;\n+\t/******** DEQ QID 1 *******/\n+\tdo {\n+\t\tstruct rte_event ev;\n+\t\tp = rte_event_dequeue(t->ev, t->port[0], &ev, 0);\n+\t\tdeq_pkts += p;\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t} while (p);\n+\n+\tif (deq_pkts != NUM_PKTS/2) {\n+\t\tprintf(\"%d: Half of NUM_PKTS didn't arrive at port 1\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/******** DEQ QID 2 *******/\n+\tdeq_pkts = 0;\n+\tdo {\n+\t\tstruct rte_event ev;\n+\t\tp = rte_event_dequeue(t->ev, t->port[1], &ev, 0);\n+\t\tdeq_pkts += p;\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t} while (p);\n+\tif (deq_pkts != NUM_PKTS/2) {\n+\t\tprintf(\"%d: Half of NUM_PKTS didn't arrive at port 2\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+load_balancing(struct test *t)\n+{\n+\tconst int rx_enq = 0;\n+\tint err;\n+\tuint32_t i;\n+\n+\tif (init(t, 1, 4) < 0 ||\n+\t\t\tcreate_ports(t, 4) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tstruct rte_event_queue_link link = {t->qid[0],\n+\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\tfor (i = 0; i < 3; i++) {\n+\t\t/* map port 1 - 3 inclusive */\n+\t\tif (rte_event_port_link(t->ev, t->port[i+1], &link, 1) != 1) {\n+\t\t\tprintf(\"%d: error mapping qid to port %d\\n\", __LINE__, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/************** FORWARD ****************/\n+\t/*\n+\t * Create a set of flows that test the load-balancing operation of the\n+\t * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test\n+\t * with a new flow, which should be sent to the 3rd mapped CQ\n+\t */\n+\tstatic uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};\n+#define PKT_NUM (sizeof(flows) / sizeof(flows[0]))\n+\tfor (i = 0; i < PKT_NUM; i++) {\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = t->qid[0],\n+\t\t\t\t.flow_id = flows[i],\n+\t\t\t\t.mbuf = arp,\n+\t\t};\n+\t\t/* generate pkt and enqueue */\n+\t\terr = rte_event_enqueue(t->ev, t->port[rx_enq], &ev, 0);\n+\t\tif (err < 1) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\trte_event_schedule(t->ev);\n+\n+\tstruct rte_event_dev_stats stats;\n+\terr = rte_event_dev_stats_get(t->ev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (stats.port_inflight[1] != 4) {\n+\t\tprintf(\"%d:%s: port 1 inflight not correct\\n\", __LINE__, __func__);\n+\t\treturn -1;\n+\t}\n+\tif (stats.port_inflight[2] != 2) {\n+\t\tprintf(\"%d:%s: port 2 inflight not correct\\n\", __LINE__, __func__);\n+\t\treturn -1;\n+\t}\n+\tif (stats.port_inflight[3] != 3) {\n+\t\tprintf(\"%d:%s: port 3 inflight not correct\\n\", __LINE__, __func__);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+invalid_qid(struct test *t)\n+{\n+\tstruct rte_event_dev_stats stats;\n+\tconst int rx_enq = 0;\n+\tint err;\n+\tuint32_t i;\n+\n+\tif (init(t, 1, 4) < 0 ||\n+\t\t\tcreate_ports(t, 4) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\tfor(i = 0; i < 4; i++) {\n+\t\tstruct rte_event_queue_link link = {t->qid[0],\n+\t\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\t\terr = rte_event_port_link(t->ev, t->port[i], &link, 1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: error mapping port 1 qid\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/*\n+\t * Send in a packet with an invalid qid to the scheduler.\n+\t * We should see the packed enqueued OK, but the inflights for\n+\t * that packet should not be incremented, and the rx_dropped\n+\t * should be incremented.\n+\t */\n+\tstatic uint32_t flows1[] = {20};\n+\n+#define PKT_NUM1 (sizeof(flows1) / sizeof(flows1[0]))\n+\n+\tfor (i = 0; i < PKT_NUM1; i++) {\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = t->qid[0] + flows1[i],\n+\t\t\t\t.flow_id = i,\n+\t\t\t\t.mbuf = arp,\n+\t\t};\n+\t\t/* generate pkt and enqueue */\n+\t\terr = rte_event_enqueue(t->ev, t->port[rx_enq], &ev, 0);\n+\t\tif (err < 1) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* call the scheduler */\n+\tint16_t pkts = rte_event_schedule(t->ev);\n+\tRTE_SET_USED(pkts);\n+\n+\terr = rte_event_dev_stats_get(t->ev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/*\n+\t * Now check the resulting inflights on the port, and the rx_dropped.\n+\t */\n+\tif (stats.port_inflight[0] != 0) {\n+\t\tprintf(\"%d:%s: port 1 inflight count not correct\\n\", __LINE__, __func__);\n+\t\trte_event_dev_dump(stdout, 0);\n+\t\treturn -1;\n+\t}\n+\tif (stats.port_rx_dropped[0] != 1) {\n+\t\tprintf(\"%d:%s: port 1 drops\\n\", __LINE__, __func__);\n+\t\trte_event_dev_dump(stdout, 0);\n+\t\treturn -1;\n+\t}\n+\t/* each packet drop should only be counted in one place - port or dev */\n+\tif (stats.rx_dropped != 0) {\n+\t\tprintf(\"%d:%s: port 1 dropped count not correct\\n\", __LINE__, __func__);\n+\t\trte_event_dev_dump(stdout, 0);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+worker_loopback_worker_fn(void *arg)\n+{\n+\tstruct test *t = arg;\n+\tuint8_t port = t->port[1];\n+\tint count = 0;\n+\tint err;\n+\n+\t/*\n+\t * Takes packets from the input port and then loops them back through\n+\t * the Queue Manager. Each packet gets looped through QIDs 0-8, 16 times,\n+\t * so each packet goes through 8*16 = 128 times.\n+\t */\n+\tprintf(\"%d: \\tWorker function started\\n\", __LINE__);\n+\twhile (count < NUM_PACKETS) {\n+#define BURST_SIZE 32\n+\t\tstruct rte_event ev[BURST_SIZE];\n+\t\tuint16_t i, nb_rx = rte_event_dequeue(t->ev, port, ev, BURST_SIZE);\n+\t\tif (nb_rx == 0) {\n+\t\t\trte_pause();\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tfor (i = 0; i < nb_rx; i++) {\n+\t\t\tev[i].queue_id++;\n+\t\t\tif (ev[i].queue_id != 8) {\n+\t\t\t\tev[i].operation = RTE_EVENT_OP_FORWARD;\n+\t\t\t\terr = rte_event_enqueue(t->ev, port, &ev[i], 0);\n+\t\t\t\tif (err <= 0) {\n+\t\t\t\t\tprintf(\"%d: Can't enqueue FWD!!\\n\", __LINE__);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\tev[i].queue_id = 0;\n+\t\t\tev[i].mbuf->udata64++;\n+\t\t\tif (ev[i].mbuf->udata64 != 16) {\n+\t\t\t\tev[i].operation = RTE_EVENT_OP_FORWARD;\n+\t\t\t\terr = rte_event_enqueue(t->ev, port, &ev[i], 0);\n+\t\t\t\tif (err <= 0) {\n+\t\t\t\t\tprintf(\"%d: Can't enqueue FWD!!\\n\", __LINE__);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\t/* we have hit 16 iterations through system - drop */\n+\t\t\trte_pktmbuf_free(ev[i].mbuf);\n+\t\t\tcount++;\n+\t\t\tev[i].operation = RTE_EVENT_OP_DROP;\n+\t\t\terr = rte_event_enqueue(t->ev, port, &ev[i], 0);\n+\t\t\tif(err != 1) {\n+\t\t\t\tprintf(\"%d drop enqueue failed\\n\", __LINE__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+worker_loopback_producer_fn(void *arg)\n+{\n+\tstruct test *t = arg;\n+\tuint8_t port = t->port[0];\n+\tuint64_t count = 0;\n+\n+\tprintf(\"%d: \\tProducer function started\\n\", __LINE__);\n+\twhile (count < NUM_PACKETS) {\n+\t\tstruct rte_mbuf *m = rte_pktmbuf_alloc(t->mbuf_pool);\n+\t\tif (m == NULL) {\n+\t\t\tprintf(\"%d: Error allocating mbuf\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tm->udata64 = 0;\n+\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.operation = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = t->qid[0],\n+\t\t\t\t.flow_id = (uintptr_t)m & 0xFFFF,\n+\t\t\t\t.mbuf = m,\n+\t\t};\n+\n+\t\twhile (rte_event_enqueue(t->ev, port, &ev, 0) != 1)\n+\t\t\trte_pause();\n+\n+\t\tcount++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+worker_loopback(struct test *t)\n+{\n+\t/* use a single producer core, and a worker core to see what happens\n+\t * if the worker loops packets back multiple times\n+\t */\n+\tstruct rte_event_dev_stats stats;\n+\tuint64_t print_cycles = 0, cycles = 0;\n+\tuint64_t tx_pkts = 0;\n+\tint err;\n+\tint w_lcore, p_lcore;\n+\tuint32_t i;\n+\n+\tif (init(t, 8, 2) < 0 ||\n+\t\t\tcreate_ports(t, 2) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 8) < 0) {\n+\t\tprintf(\"%d: Error initialising device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\tfor(i = 0; i < 8; i++) {\n+\t\tstruct rte_event_queue_link link = {t->qid[i],\n+\t\t\t\tRTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL };\n+\t\terr = rte_event_port_link(t->ev, t->port[1], &link, 1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: error mapping port 2 qid %d\\n\", __LINE__, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (rte_event_dev_start(t->ev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tp_lcore = rte_get_next_lcore(\n+\t\t\t/* start core */ -1,\n+\t\t\t/* skip master */ 1,\n+\t\t\t/* wrap */ 0);\n+\tw_lcore = rte_get_next_lcore(p_lcore, 1, 0);\n+\n+\trte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);\n+\trte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);\n+\n+\tprint_cycles = cycles = rte_get_timer_cycles();\n+\twhile (rte_eal_get_lcore_state(p_lcore) != FINISHED ||\n+\t\t\trte_eal_get_lcore_state(w_lcore) != FINISHED) {\n+\n+\t\trte_event_schedule(t->ev);\n+\n+\t\tuint64_t new_cycles = rte_get_timer_cycles();\n+\n+\t\tif (new_cycles - print_cycles > rte_get_timer_hz()) {\n+\t\t\trte_event_dev_stats_get(t->ev, &stats);\n+\t\t\tprintf(\"%d: \\tSched Rx = %\" PRIu64 \", Tx = %\" PRIu64 \"\\n\",\n+\t\t\t\t\t__LINE__, stats.rx_pkts, stats.tx_pkts);\n+\n+\t\t\tprint_cycles = new_cycles;\n+\t\t}\n+\t\tif (new_cycles - cycles > rte_get_timer_hz() * 3) {\n+\t\t\trte_event_dev_stats_get(t->ev, &stats);\n+\t\t\tif (stats.tx_pkts == tx_pkts) {\n+\t\t\t\trte_event_dev_dump(stdout, t->ev);\n+\t\t\t\tprintf(\"%d: \\nNo schedules for seconds, deadlock\\n\", __LINE__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\ttx_pkts = stats.tx_pkts;\n+\t\t\tcycles = new_cycles;\n+\t\t}\n+\t}\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\t//rte_event_dev_dump(stdout, 0);\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static struct rte_mempool *eventdev_func_mempool;\n+\n+static int\n+test_eventdev(void)\n+{\n+\tstruct test *t = malloc(sizeof(struct test));\n+\tint ret;\n+\n+\t/* Only create mbuf pool once, reuse for each test run */\n+\tif (!eventdev_func_mempool) {\n+\t\teventdev_func_mempool = rte_pktmbuf_pool_create(\"EVDEV_SA_MBUF_POOL\",\n+\t\t\t\t(1<<16), /* size */\n+\t\t\t\t32 /*MBUF_CACHE_SIZE*/,\n+\t\t\t\t0,\n+\t\t\t\tRTE_MBUF_DEFAULT_BUF_SIZE,\n+\t\t\t\trte_socket_id());\n+\t\tif (!eventdev_func_mempool) {\n+\t\t\tprintf(\"ERROR creating mempool\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\tt->mbuf_pool = eventdev_func_mempool;\n+\n+\tprintf(\"*** Running Single Directed Packet test...\\n\");\n+\tret = test_single_directed_packet(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Single Directed Packet test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Overload Trip test...\\n\");\n+\tret = test_overload_trip(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Overload Trip test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Directed Overload test...\\n\");\n+\tret = test_directed_overload(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Directed Overload test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Prioritized Directed test...\\n\");\n+\tret = test_priority_directed(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Prioritized Directed test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Prioritized Atomic test...\\n\");\n+\tret = test_priority_atomic(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Prioritized Atomic test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\n+\tprintf(\"*** Running Prioritized Ordered test...\\n\");\n+\tret = test_priority_ordered(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Prioritized Ordered test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Prioritized Unordered test...\\n\");\n+\tret = test_priority_unordered(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Prioritized Unordered test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Burst Packets test...\\n\");\n+\tret = burst_packets(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Burst Packets test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Load Balancing test...\\n\");\n+\tret = load_balancing(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Load Balancing test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Invalid QID test...\\n\");\n+\tret = invalid_qid(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Invalid QID test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tif (rte_lcore_count() >= 3) {\n+\t\tprintf(\"*** Running Worker loopback test...\\n\");\n+\t\tret = worker_loopback(t);\n+\t\tif (ret != 0) {\n+\t\t\tprintf(\"ERROR - Worker loopback test FAILED.\\n\");\n+\t\t\treturn ret;\n+\t\t}\n+\t} else {\n+\t\tprintf(\"### Not enough cores for worker loopback test. \\n\");\n+\t\tprintf(\"### Need at least 3 cores for test.\\n\");\n+\t}\n+\t/* Free test instance, leaving mempool initialized, and a pointer to it\n+\t * in the static eventdev_func_mempool variable. It is re-used on re-runs */\n+\tfree(t);\n+\n+\treturn 0;\n+}\n+\n+REGISTER_TEST_COMMAND(eventdev_func_autotest, test_eventdev);\ndiff --git a/app/test/test_eventdev_unit.c b/app/test/test_eventdev_unit.c\nnew file mode 100644\nindex 0000000..c145401\n--- /dev/null\n+++ b/app/test/test_eventdev_unit.c\n@@ -0,0 +1,557 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdio.h>\n+#include <unistd.h>\n+#include <stdlib.h>\n+#include <stdbool.h>\n+#include <stdarg.h>\n+#include \"test.h\"\n+#include <sys/types.h>\n+#include <sys/stat.h>\n+#include <string.h>\n+\n+#include <rte_eventdev.h>\n+#include <rte_lcore.h>\n+#include <rte_mbuf.h>\n+\n+typedef enum eventdev_api_ut_ids_s {\n+\tEVENTDEV_API_UT_001 = 1,\n+\tEVENTDEV_API_UT_002,\n+\tEVENTDEV_API_UT_003,\n+\tEVENTDEV_API_UT_004,\n+\tEVENTDEV_API_UT_005,\n+\tEVENTDEV_API_UT_006,\n+\tEVENTDEV_API_UT_007,\n+\tEVENTDEV_API_UT_008,\n+\tEVENTDEV_API_UT_009,\n+\tEVENTDEV_API_UT_010,\n+\tEVENTDEV_API_UT_011,\n+\tEVENTDEV_API_UT_012,\n+\tEVENTDEV_API_UT_013,\n+\tEVENTDEV_API_UT_014,\n+\tEVENTDEV_API_UT_015,\n+\tEVENTDEV_API_UT_016,\n+\tEVENTDEV_API_UT_017,\n+\tEVENTDEV_API_UT_MAX\n+} eventdev_api_ut_ids_t;\n+\n+typedef enum eventdev_tc_status_s {\n+\tTC_FAILED,\n+\tTC_PASSED\n+} eventdev_tc_status_t;\n+\n+typedef struct eventdev_api_ut_status_s {\n+\tbool executed;\n+\teventdev_tc_status_t status;\n+} eventdev_api_ut_status_t;\n+\n+eventdev_api_ut_status_t api_ut_status[EVENTDEV_API_UT_MAX];\n+\n+#define CONFIG_NB_EVENT_QUEUES 2\n+#define CONFIG_NB_EVENT_PORTS 2\n+#define CONFIG_NB_EVENT_LIMIT 128\n+\n+uint8_t queues[CONFIG_NB_EVENT_QUEUES];\n+uint8_t ports[CONFIG_NB_EVENT_PORTS];\n+\n+/* FIXME: Check that dependent tests have executed */\n+\n+static int test_EVENTDEV_API_UT_001_rte_event_dev_count(void)\n+{\n+\tuint8_t count = rte_event_dev_count();\n+\n+\tif (count == 1) {\n+\t\tapi_ut_status[EVENTDEV_API_UT_001].status = TC_PASSED;\n+\t\treturn 0;\n+\t} else {\n+\t\tapi_ut_status[EVENTDEV_API_UT_001].status = TC_FAILED;\n+\t\treturn 1;\n+\t}\n+}\n+\n+static int test_EVENTDEV_API_UT_002_rte_event_dev_get_dev_id(void)\n+{\n+\tint8_t id;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tif (id < 0) {\n+\t\tapi_ut_status[EVENTDEV_API_UT_002].status = TC_FAILED;\n+\t\treturn 1;\n+\t}\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_abcd123\");\n+\n+\tif (id >= 0) {\n+\t\tapi_ut_status[EVENTDEV_API_UT_002].status = TC_FAILED;\n+\t\treturn 1;\n+\t}\n+\n+\tapi_ut_status[EVENTDEV_API_UT_002].status = TC_PASSED;\n+\treturn 0;\n+}\n+\n+static int test_EVENTDEV_API_UT_003_rte_event_dev_info_get(void)\n+{\n+\tstruct rte_event_dev_info info;\n+\tint8_t id;\n+\tint ret;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tret = rte_event_dev_info_get(id, &info);\n+\tif (ret)\n+\t\tgoto fail;\n+\n+\tif (strncmp(info.driver_name, \"evdev_sw\", sizeof(\"evdev_sw\")) != 0)\n+\t\tgoto fail;\n+\n+\t/* FIXME: Add checks for remaining fields */\n+\n+\tapi_ut_status[EVENTDEV_API_UT_003].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_003].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_004_rte_event_dev_configure(void)\n+{\n+\tstruct rte_event_dev_config config;\n+\tint8_t id;\n+\tint ret;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_004].executed = true;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tconfig.nb_event_queues = CONFIG_NB_EVENT_QUEUES; /* FIXME: Test max */\n+\tconfig.nb_event_ports = CONFIG_NB_EVENT_PORTS; /* FIXME: Test max */\n+\tconfig.nb_events_limit = CONFIG_NB_EVENT_LIMIT; /* FIXME: Test max */\n+\tconfig.dequeue_wait_ns = 0; /* FIXME: Test max */\n+\n+\tret = rte_event_dev_configure(id, &config);\n+\tif (ret)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_004].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_004].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_005_rte_event_queue_count_pre(void)\n+{\n+\tint8_t id;\n+\tuint8_t count;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tcount = rte_event_queue_count(id);\n+\tif (count != CONFIG_NB_EVENT_QUEUES)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_005].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_005].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_006_rte_event_queue_setup(void)\n+{\n+\tstruct rte_event_queue_conf config;\n+\tint8_t id;\n+\tint ret;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_006].executed = true;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tconfig.event_queue_cfg = 0;\n+\tconfig.priority = 0;\n+\n+\tqueues[0] = 0;\n+\n+\tret = rte_event_queue_setup(id, queues[0], &config);\n+\tif (ret < 0)\n+\t\tgoto fail;\n+\n+\tconfig.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_CONSUMER;\n+\tconfig.priority = 0;\n+\n+\tqueues[1] = 1;\n+\n+\tret = rte_event_queue_setup(id, queues[1], &config);\n+\tif (ret < 0)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_006].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_006].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_007_rte_event_queue_count_post(void)\n+{\n+\tint8_t id;\n+\tuint8_t count;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_006].executed)\n+\t\ttest_EVENTDEV_API_UT_006_rte_event_queue_setup();\n+\tif (api_ut_status[EVENTDEV_API_UT_006].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tcount = rte_event_queue_count(id);\n+\tif (count != CONFIG_NB_EVENT_QUEUES)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_007].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_007].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_008_rte_event_port_count_pre(void)\n+{\n+\tint8_t id;\n+\tuint8_t count;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tcount = rte_event_port_count(id);\n+\tif (count != CONFIG_NB_EVENT_PORTS)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_008].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_008].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_009_rte_event_port_setup(void)\n+{\n+\tstruct rte_event_port_conf config;\n+\tint8_t id;\n+\tint ret;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_009].executed = true;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tconfig.dequeue_queue_depth = 4;\n+\tconfig.enqueue_queue_depth = 4;\n+\tconfig.new_event_threshold = 64;\n+\n+\tports[0] = 0;\n+\n+\tret = rte_event_port_setup(id, ports[0], &config);\n+\tif (ret < 0)\n+\t\tgoto fail;\n+\n+\tconfig.dequeue_queue_depth = 4;\n+\tconfig.enqueue_queue_depth = 4;\n+\tconfig.new_event_threshold = 64;\n+\n+\tports[1] = 1;\n+\n+\tret = rte_event_port_setup(id, ports[1], &config);\n+\tif (ret < 0)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_009].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_009].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_010_rte_event_port_count_post(void)\n+{\n+\tint8_t id;\n+\tuint8_t count;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_009].executed)\n+\t\ttest_EVENTDEV_API_UT_009_rte_event_port_setup();\n+\tif (api_ut_status[EVENTDEV_API_UT_009].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tcount = rte_event_port_count(id);\n+\tif (count != CONFIG_NB_EVENT_PORTS)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_010].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_010].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_011_rte_event_dev_start(void)\n+{\n+\tint8_t id;\n+\tint ret;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tret = rte_event_dev_start(id);\n+\tif (ret != 0)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_011].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_011].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_012_rte_event_port_link(void)\n+{\n+\tstruct rte_event_queue_link link;\n+\tint8_t id;\n+\tint ret;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_006].executed)\n+\t\ttest_EVENTDEV_API_UT_006_rte_event_queue_setup();\n+\tif (api_ut_status[EVENTDEV_API_UT_006].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_009].executed)\n+\t\ttest_EVENTDEV_API_UT_009_rte_event_port_setup();\n+\tif (api_ut_status[EVENTDEV_API_UT_009].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tlink.queue_id = queues[0];\n+\tlink.priority = 0;\n+\n+\t/* Connect port to previously configured scheduled queue */\n+\tret = rte_event_port_link(id, ports[0], &link, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: failed here\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\t/* Check idempotency of re-linking port to queues[0] */\n+\tret = rte_event_port_link(id, ports[0], &link, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: failed here\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tlink.queue_id = queues[1];\n+\tlink.priority = 0;\n+\n+\t/* Attempt to connect to FIFO queue as well */\n+\tret = rte_event_port_link(id, ports[0], &link, 1);\n+\tif (ret == 1) {\n+\t\tprintf(\"%d: failed here\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tlink.queue_id = queues[1];\n+\tlink.priority = 0;\n+\n+\t/* Connect port to previously configured FIFO queue */\n+\tret = rte_event_port_link(id, ports[1], &link, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: failed here\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tlink.queue_id = queues[0];\n+\tlink.priority = 0;\n+\n+\t/* Attempt to connect to scheduled queue as well */\n+\tret = rte_event_port_link(id, ports[1], &link, 1);\n+\tif (ret == 1) {\n+\t\tprintf(\"%d: failed here\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\t/* link to 2nd queue, enabling start() to pass later */\n+\tlink.queue_id = queues[1];\n+\tlink.priority = 0;\n+\tret = rte_event_port_link(id, ports[1], &link, 1);\n+\tif (ret == 1) {\n+\t\tprintf(\"%d: failed here\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tapi_ut_status[EVENTDEV_API_UT_012].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_012].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int test_EVENTDEV_API_UT_014_rte_event_dev_stop(void)\n+{\n+\tint8_t id;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\treturn 1;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\trte_event_dev_stop(id);\n+\n+\tapi_ut_status[EVENTDEV_API_UT_014].status = TC_PASSED;\n+\treturn 0;\n+}\n+\n+static int test_EVENTDEV_API_UT_015_rte_event_dev_close(void)\n+{\n+\tint8_t id;\n+\tint ret;\n+\n+\tif (!api_ut_status[EVENTDEV_API_UT_004].executed)\n+\t\ttest_EVENTDEV_API_UT_004_rte_event_dev_configure();\n+\tif (api_ut_status[EVENTDEV_API_UT_004].status == TC_FAILED)\n+\t\tgoto fail;\n+\n+\tid = rte_event_dev_get_dev_id(\"evdev_sw0\");\n+\n+\tret = rte_event_dev_close(id);\n+\tif (ret != 0)\n+\t\tgoto fail;\n+\n+\tapi_ut_status[EVENTDEV_API_UT_015].status = TC_PASSED;\n+\treturn 0;\n+\n+fail:\n+\tapi_ut_status[EVENTDEV_API_UT_015].status = TC_FAILED;\n+\treturn 1;\n+}\n+\n+static int\n+test_setup(void)\n+{\n+\treturn 0;\n+}\n+\n+static struct unit_test_suite eventdev_test_suite  = {\n+\t.setup = test_setup,\n+\t.suite_name = \"Eventdev Test Suite\",\n+\t.unit_test_cases = {\n+\t\t/* device aquisition and config */\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_001_rte_event_dev_count),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_002_rte_event_dev_get_dev_id),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_003_rte_event_dev_info_get),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_004_rte_event_dev_configure),\n+\t\t/* queue config */\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_005_rte_event_queue_count_pre),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_006_rte_event_queue_setup),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_007_rte_event_queue_count_post),\n+\t\t/* port config */\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_008_rte_event_port_count_pre),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_009_rte_event_port_setup),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_010_rte_event_port_count_post),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_012_rte_event_port_link),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_011_rte_event_dev_start),\n+\t\t/* device cleanup */\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_014_rte_event_dev_stop),\n+\t\tTEST_CASE(test_EVENTDEV_API_UT_015_rte_event_dev_close),\n+\t\tTEST_CASES_END()\n+\t}\n+};\n+\n+static int\n+test_eventdev_unit(void)\n+{\n+\treturn unit_test_suite_runner(&eventdev_test_suite);\n+}\n+\n+REGISTER_TEST_COMMAND(eventdev_unit_autotest, test_eventdev_unit);\n",
    "prefixes": [
        "dpdk-dev",
        "5/7"
    ]
}