get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/22336/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 22336,
    "url": "http://patches.dpdk.org/api/patches/22336/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1490374395-149320-16-git-send-email-harry.van.haaren@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1490374395-149320-16-git-send-email-harry.van.haaren@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1490374395-149320-16-git-send-email-harry.van.haaren@intel.com",
    "date": "2017-03-24T16:53:10",
    "name": "[dpdk-dev,v5,15/20] test/eventdev: add basic SW tests",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "430c4c754b1f508594a254705a31a2fd07d6d746",
    "submitter": {
        "id": 317,
        "url": "http://patches.dpdk.org/api/people/317/?format=api",
        "name": "Van Haaren, Harry",
        "email": "harry.van.haaren@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1490374395-149320-16-git-send-email-harry.van.haaren@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/22336/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/22336/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id EEAFDD324;\n\tFri, 24 Mar 2017 17:54:09 +0100 (CET)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n\tby dpdk.org (Postfix) with ESMTP id 344F9CFA2\n\tfor <dev@dpdk.org>; Fri, 24 Mar 2017 17:53:37 +0100 (CET)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n\tby fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t24 Mar 2017 09:53:36 -0700",
            "from silpixa00398672.ir.intel.com ([10.237.223.128])\n\tby orsmga004.jf.intel.com with ESMTP; 24 Mar 2017 09:53:35 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=simple/simple;\n\td=intel.com; i=@intel.com; q=dns/txt; s=intel;\n\tt=1490374417; x=1521910417;\n\th=from:to:cc:subject:date:message-id:in-reply-to: references;\n\tbh=N0I+NC5ulQ7DcMGV4aLZzr7oKsGSwK6GOju38gL581c=;\n\tb=AnOKnsGgxifhdv6F4BFskgCpv60e2wqvfxamKKRj88fwI1fAxOuhmh43\n\tNK9Y+JfyDs5hGaZn979N0mzTHZR+CA==;",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.36,215,1486454400\"; d=\"scan'208\";a=\"70349165\"",
        "From": "Harry van Haaren <harry.van.haaren@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "jerin.jacob@caviumnetworks.com,\n\tHarry van Haaren <harry.van.haaren@intel.com>,\n\tBruce Richardson <bruce.richardson@intel.com>,\n\tDavid Hunt <david.hunt@intel.com>",
        "Date": "Fri, 24 Mar 2017 16:53:10 +0000",
        "Message-Id": "<1490374395-149320-16-git-send-email-harry.van.haaren@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1490374395-149320-1-git-send-email-harry.van.haaren@intel.com>",
        "References": "<489175012-101439-1-git-send-email-harry.van.haaren@intel.com>\n\t<1490374395-149320-1-git-send-email-harry.van.haaren@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 15/20] test/eventdev: add basic SW tests",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This commit adds basic enqueue and dequeue unit tests,\nsome negative invalid tests, and configuration.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nSigned-off-by: David Hunt <david.hunt@intel.com>\nSigned-off-by: Harry van Haaren <harry.van.haaren@intel.com>\n\n---\n\nv5:\n- Work around struct element bitfield initialization for old gcc versions\n---\n test/test/test_eventdev_sw.c | 1060 ++++++++++++++++++++++++++++++++++++++++++\n 1 file changed, 1060 insertions(+)",
    "diff": "diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c\nindex 808b7b3..f294cb9 100644\n--- a/test/test/test_eventdev_sw.c\n+++ b/test/test/test_eventdev_sw.c\n@@ -64,6 +64,8 @@ struct test {\n \tint nb_qids;\n };\n \n+static struct rte_event release_ev;\n+\n static inline struct rte_mbuf *\n rte_gen_arp(int portid, struct rte_mempool *mp)\n {\n@@ -307,12 +309,1004 @@ test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)\n \treturn 0;\n }\n \n+static int\n+test_single_directed_packet(struct test *t)\n+{\n+\tconst int rx_enq = 0;\n+\tconst int wrk_enq = 2;\n+\tint err;\n+\n+\t/* Create instance with 3 directed QIDs going to 3 ports */\n+\tif (init(t, 3, 3) < 0 ||\n+\t\t\tcreate_ports(t, 3) < 0 ||\n+\t\t\tcreate_directed_qids(t, 3, t->port) < 0)\n+\t\treturn -1;\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/************** FORWARD ****************/\n+\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\tstruct rte_event ev = {\n+\t\t\t.op = RTE_EVENT_OP_NEW,\n+\t\t\t.queue_id = wrk_enq,\n+\t\t\t.mbuf = arp,\n+\t};\n+\n+\tif (!arp) {\n+\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tconst uint32_t MAGIC_SEQN = 4711;\n+\tarp->seqn = MAGIC_SEQN;\n+\n+\t/* generate pkt and enqueue */\n+\terr = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);\n+\tif (err < 0) {\n+\t\tprintf(\"%d: error failed to enqueue\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Run schedule() as dir packets may need to be re-ordered */\n+\trte_event_schedule(evdev);\n+\n+\tstruct test_event_dev_stats stats;\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: error failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (stats.port_rx_pkts[rx_enq] != 1) {\n+\t\tprintf(\"%d: error stats incorrect for directed port\\n\",\n+\t\t\t\t__LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t deq_pkts;\n+\tdeq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);\n+\tif (deq_pkts != 1) {\n+\t\tprintf(\"%d: error failed to deq\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (stats.port_rx_pkts[wrk_enq] != 0 &&\n+\t\t\tstats.port_rx_pkts[wrk_enq] != 1) {\n+\t\tprintf(\"%d: error directed stats post-dequeue\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (ev.mbuf->seqn != MAGIC_SEQN) {\n+\t\tprintf(\"%d: error magic sequence number not dequeued\\n\",\n+\t\t\t\t__LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\trte_pktmbuf_free(ev.mbuf);\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+burst_packets(struct test *t)\n+{\n+\t/************** CONFIG ****************/\n+\tuint32_t i;\n+\tint err;\n+\tint ret;\n+\n+\t/* Create instance with 2 ports and 2 queues */\n+\tif (init(t, 2, 2) < 0 ||\n+\t\t\tcreate_ports(t, 2) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 2) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\tret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: error mapping lb qid0\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: error mapping lb qid1\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/************** FORWARD ****************/\n+\tconst uint32_t rx_port = 0;\n+\tconst uint32_t NUM_PKTS = 2;\n+\n+\tfor (i = 0; i < NUM_PKTS; i++) {\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: error generating pkt\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.op = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = i % 2,\n+\t\t\t\t.flow_id = i % 3,\n+\t\t\t\t.mbuf = arp,\n+\t\t};\n+\t\t/* generate pkt and enqueue */\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);\n+\t\tif (err < 0) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\trte_event_schedule(evdev);\n+\n+\t/* Check stats for all NUM_PKTS arrived to sched core */\n+\tstruct test_event_dev_stats stats;\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tif (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {\n+\t\tprintf(\"%d: Sched core didn't receive all %d pkts\\n\",\n+\t\t\t\t__LINE__, NUM_PKTS);\n+\t\trte_event_dev_dump(evdev, stdout);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t deq_pkts;\n+\tint p;\n+\n+\tdeq_pkts = 0;\n+\t/******** DEQ QID 1 *******/\n+\tdo {\n+\t\tstruct rte_event ev;\n+\t\tp = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);\n+\t\tdeq_pkts += p;\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t} while (p);\n+\n+\tif (deq_pkts != NUM_PKTS/2) {\n+\t\tprintf(\"%d: Half of NUM_PKTS didn't arrive at port 1\\n\",\n+\t\t\t\t__LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/******** DEQ QID 2 *******/\n+\tdeq_pkts = 0;\n+\tdo {\n+\t\tstruct rte_event ev;\n+\t\tp = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);\n+\t\tdeq_pkts += p;\n+\t\trte_pktmbuf_free(ev.mbuf);\n+\t} while (p);\n+\tif (deq_pkts != NUM_PKTS/2) {\n+\t\tprintf(\"%d: Half of NUM_PKTS didn't arrive at port 2\\n\",\n+\t\t\t\t__LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+abuse_inflights(struct test *t)\n+{\n+\tconst int rx_enq = 0;\n+\tconst int wrk_enq = 2;\n+\tint err;\n+\n+\t/* Create instance with 4 ports */\n+\tif (init(t, 1, 4) < 0 ||\n+\t\t\tcreate_ports(t, 4) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\terr = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);\n+\tif (err != 1) {\n+\t\tprintf(\"%d: error mapping lb qid\\n\", __LINE__);\n+\t\tcleanup(t);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Enqueue op only */\n+\terr = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);\n+\tif (err < 0) {\n+\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* schedule */\n+\trte_event_schedule(evdev);\n+\n+\tstruct test_event_dev_stats stats;\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (stats.rx_pkts != 0 ||\n+\t\t\tstats.tx_pkts != 0 ||\n+\t\t\tstats.port_inflight[wrk_enq] != 0) {\n+\t\tprintf(\"%d: Sched core didn't handle pkt as expected\\n\",\n+\t\t\t\t__LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+port_reconfig_credits(struct test *t)\n+{\n+\tif (init(t, 1, 1) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t i;\n+\tconst uint32_t NUM_ITERS = 32;\n+\tfor (i = 0; i < NUM_ITERS; i++) {\n+\t\tconst struct rte_event_queue_conf conf = {\n+\t\t\t.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,\n+\t\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,\n+\t\t\t.nb_atomic_flows = 1024,\n+\t\t\t.nb_atomic_order_sequences = 1024,\n+\t\t};\n+\t\tif (rte_event_queue_setup(evdev, 0, &conf) < 0) {\n+\t\t\tprintf(\"%d: error creating qid\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tt->qid[0] = 0;\n+\n+\t\tstatic const struct rte_event_port_conf port_conf = {\n+\t\t\t\t.new_event_threshold = 128,\n+\t\t\t\t.dequeue_depth = 32,\n+\t\t\t\t.enqueue_depth = 64,\n+\t\t};\n+\t\tif (rte_event_port_setup(evdev, 0, &port_conf) < 0) {\n+\t\t\tprintf(\"%d Error setting up port\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tint links = rte_event_port_link(evdev, 0, NULL, NULL, 0);\n+\t\tif (links != 1) {\n+\t\t\tprintf(\"%d: error mapping lb qid\\n\", __LINE__);\n+\t\t\tgoto fail;\n+\t\t}\n+\n+\t\tif (rte_event_dev_start(evdev) < 0) {\n+\t\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\t\tgoto fail;\n+\t\t}\n+\n+\t\tconst uint32_t NPKTS = 1;\n+\t\tuint32_t j;\n+\t\tfor (j = 0; j < NPKTS; j++) {\n+\t\t\tstruct rte_event ev;\n+\t\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\t\t\tif (!arp) {\n+\t\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\t\tgoto fail;\n+\t\t\t}\n+\t\t\tev.queue_id = t->qid[0];\n+\t\t\tev.op = RTE_EVENT_OP_NEW;\n+\t\t\tev.mbuf = arp;\n+\t\t\tint err = rte_event_enqueue_burst(evdev, 0, &ev, 1);\n+\t\t\tif (err != 1) {\n+\t\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\t\trte_event_dev_dump(0, stdout);\n+\t\t\t\tgoto fail;\n+\t\t\t}\n+\t\t}\n+\n+\t\trte_event_schedule(evdev);\n+\n+\t\tstruct rte_event ev[NPKTS];\n+\t\tint deq = rte_event_dequeue_burst(evdev, t->port[0], ev,\n+\t\t\t\t\t\t\tNPKTS, 0);\n+\t\tif (deq != 1)\n+\t\t\tprintf(\"%d error; no packet dequeued\\n\", __LINE__);\n+\n+\t\t/* let cleanup below stop the device on last iter */\n+\t\tif (i != NUM_ITERS-1)\n+\t\t\trte_event_dev_stop(evdev);\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+fail:\n+\tcleanup(t);\n+\treturn -1;\n+}\n+\n+static int\n+port_single_lb_reconfig(struct test *t)\n+{\n+\tif (init(t, 2, 2) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tstatic const struct rte_event_queue_conf conf_lb_atomic = {\n+\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,\n+\t\t.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,\n+\t\t.nb_atomic_flows = 1024,\n+\t\t.nb_atomic_order_sequences = 1024,\n+\t};\n+\tif (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {\n+\t\tprintf(\"%d: error creating qid\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tstatic const struct rte_event_queue_conf conf_single_link = {\n+\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,\n+\t\t.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,\n+\t\t.nb_atomic_flows = 1024,\n+\t\t.nb_atomic_order_sequences = 1024,\n+\t};\n+\tif (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {\n+\t\tprintf(\"%d: error creating qid\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tstruct rte_event_port_conf port_conf = {\n+\t\t.new_event_threshold = 128,\n+\t\t.dequeue_depth = 32,\n+\t\t.enqueue_depth = 64,\n+\t};\n+\tif (rte_event_port_setup(evdev, 0, &port_conf) < 0) {\n+\t\tprintf(\"%d Error setting up port\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\tif (rte_event_port_setup(evdev, 1, &port_conf) < 0) {\n+\t\tprintf(\"%d Error setting up port\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\t/* link port to lb queue */\n+\tuint8_t queue_id = 0;\n+\tif (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {\n+\t\tprintf(\"%d: error creating link for qid\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tint ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);\n+\tif (ret != 1) {\n+\t\tprintf(\"%d: Error unlinking lb port\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tqueue_id = 1;\n+\tif (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {\n+\t\tprintf(\"%d: error creating link for qid\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tqueue_id = 0;\n+\tint err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);\n+\tif (err != 1) {\n+\t\tprintf(\"%d: error mapping lb qid\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\tgoto fail;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+fail:\n+\tcleanup(t);\n+\treturn -1;\n+}\n+\n+static int\n+ordered_reconfigure(struct test *t)\n+{\n+\tif (init(t, 1, 1) < 0 ||\n+\t\t\tcreate_ports(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tconst struct rte_event_queue_conf conf = {\n+\t\t\t.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,\n+\t\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,\n+\t\t\t.nb_atomic_flows = 1024,\n+\t\t\t.nb_atomic_order_sequences = 1024,\n+\t};\n+\n+\tif (rte_event_queue_setup(evdev, 0, &conf) < 0) {\n+\t\tprintf(\"%d: error creating qid\\n\", __LINE__);\n+\t\tgoto failed;\n+\t}\n+\n+\tif (rte_event_queue_setup(evdev, 0, &conf) < 0) {\n+\t\tprintf(\"%d: error creating qid, for 2nd time\\n\", __LINE__);\n+\t\tgoto failed;\n+\t}\n+\n+\trte_event_port_link(evdev, t->port[0], NULL, NULL, 0);\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+failed:\n+\tcleanup(t);\n+\treturn -1;\n+}\n+\n+static int\n+invalid_qid(struct test *t)\n+{\n+\tstruct test_event_dev_stats stats;\n+\tconst int rx_enq = 0;\n+\tint err;\n+\tuint32_t i;\n+\n+\tif (init(t, 1, 4) < 0 ||\n+\t\t\tcreate_ports(t, 4) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\tfor (i = 0; i < 4; i++) {\n+\t\terr = rte_event_port_link(evdev, t->port[i], &t->qid[0],\n+\t\t\t\tNULL, 1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: error mapping port 1 qid\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/*\n+\t * Send in a packet with an invalid qid to the scheduler.\n+\t * We should see the packed enqueued OK, but the inflights for\n+\t * that packet should not be incremented, and the rx_dropped\n+\t * should be incremented.\n+\t */\n+\tstatic uint32_t flows1[] = {20};\n+\n+\tfor (i = 0; i < RTE_DIM(flows1); i++) {\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.op = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = t->qid[0] + flows1[i],\n+\t\t\t\t.flow_id = i,\n+\t\t\t\t.mbuf = arp,\n+\t\t};\n+\t\t/* generate pkt and enqueue */\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);\n+\t\tif (err < 0) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* call the scheduler */\n+\trte_event_schedule(evdev);\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/*\n+\t * Now check the resulting inflights on the port, and the rx_dropped.\n+\t */\n+\tif (stats.port_inflight[0] != 0) {\n+\t\tprintf(\"%d:%s: port 1 inflight count not correct\\n\", __LINE__,\n+\t\t\t\t__func__);\n+\t\trte_event_dev_dump(evdev, stdout);\n+\t\treturn -1;\n+\t}\n+\tif (stats.port_rx_dropped[0] != 1) {\n+\t\tprintf(\"%d:%s: port 1 drops\\n\", __LINE__, __func__);\n+\t\trte_event_dev_dump(evdev, stdout);\n+\t\treturn -1;\n+\t}\n+\t/* each packet drop should only be counted in one place - port or dev */\n+\tif (stats.rx_dropped != 0) {\n+\t\tprintf(\"%d:%s: port 1 dropped count not correct\\n\", __LINE__,\n+\t\t\t\t__func__);\n+\t\trte_event_dev_dump(evdev, stdout);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+single_packet(struct test *t)\n+{\n+\tconst uint32_t MAGIC_SEQN = 7321;\n+\tstruct rte_event ev;\n+\tstruct test_event_dev_stats stats;\n+\tconst int rx_enq = 0;\n+\tconst int wrk_enq = 2;\n+\tint err;\n+\n+\t/* Create instance with 4 ports */\n+\tif (init(t, 1, 4) < 0 ||\n+\t\t\tcreate_ports(t, 4) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\terr = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);\n+\tif (err != 1) {\n+\t\tprintf(\"%d: error mapping lb qid\\n\", __LINE__);\n+\t\tcleanup(t);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/************** Gen pkt and enqueue ****************/\n+\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\tif (!arp) {\n+\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tev.op = RTE_EVENT_OP_NEW;\n+\tev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;\n+\tev.mbuf = arp;\n+\tev.queue_id = 0;\n+\tev.flow_id = 3;\n+\tarp->seqn = MAGIC_SEQN;\n+\n+\terr = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);\n+\tif (err < 0) {\n+\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\trte_event_schedule(evdev);\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (stats.rx_pkts != 1 ||\n+\t\t\tstats.tx_pkts != 1 ||\n+\t\t\tstats.port_inflight[wrk_enq] != 1) {\n+\t\tprintf(\"%d: Sched core didn't handle pkt as expected\\n\",\n+\t\t\t\t__LINE__);\n+\t\trte_event_dev_dump(evdev, stdout);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t deq_pkts;\n+\n+\tdeq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);\n+\tif (deq_pkts < 1) {\n+\t\tprintf(\"%d: Failed to deq\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (ev.mbuf->seqn != MAGIC_SEQN) {\n+\t\tprintf(\"%d: magic sequence number not dequeued\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\trte_pktmbuf_free(ev.mbuf);\n+\terr = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);\n+\tif (err < 0) {\n+\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\trte_event_schedule(evdev);\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (stats.port_inflight[wrk_enq] != 0) {\n+\t\tprintf(\"%d: port inflight not correct\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+inflight_counts(struct test *t)\n+{\n+\tstruct rte_event ev;\n+\tstruct test_event_dev_stats stats;\n+\tconst int rx_enq = 0;\n+\tconst int p1 = 1;\n+\tconst int p2 = 2;\n+\tint err;\n+\tint i;\n+\n+\t/* Create instance with 4 ports */\n+\tif (init(t, 2, 3) < 0 ||\n+\t\t\tcreate_ports(t, 3) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 2) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* CQ mapping to QID */\n+\terr = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);\n+\tif (err != 1) {\n+\t\tprintf(\"%d: error mapping lb qid\\n\", __LINE__);\n+\t\tcleanup(t);\n+\t\treturn -1;\n+\t}\n+\terr = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);\n+\tif (err != 1) {\n+\t\tprintf(\"%d: error mapping lb qid\\n\", __LINE__);\n+\t\tcleanup(t);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/************** FORWARD ****************/\n+#define QID1_NUM 5\n+\tfor (i = 0; i < QID1_NUM; i++) {\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\tev.queue_id =  t->qid[0];\n+\t\tev.op = RTE_EVENT_OP_NEW;\n+\t\tev.mbuf = arp;\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\t}\n+#define QID2_NUM 3\n+\tfor (i = 0; i < QID2_NUM; i++) {\n+\t\tstruct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);\n+\n+\t\tif (!arp) {\n+\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\t\tev.queue_id =  t->qid[1];\n+\t\tev.op = RTE_EVENT_OP_NEW;\n+\t\tev.mbuf = arp;\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\t}\n+\n+\t/* schedule */\n+\trte_event_schedule(evdev);\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (err) {\n+\t\tprintf(\"%d: failed to get stats\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\n+\tif (stats.rx_pkts != QID1_NUM + QID2_NUM ||\n+\t\t\tstats.tx_pkts != QID1_NUM + QID2_NUM) {\n+\t\tprintf(\"%d: Sched core didn't handle pkt as expected\\n\",\n+\t\t\t\t__LINE__);\n+\t\tgoto err;\n+\t}\n+\n+\tif (stats.port_inflight[p1] != QID1_NUM) {\n+\t\tprintf(\"%d: %s port 1 inflight not correct\\n\", __LINE__,\n+\t\t\t\t__func__);\n+\t\tgoto err;\n+\t}\n+\tif (stats.port_inflight[p2] != QID2_NUM) {\n+\t\tprintf(\"%d: %s port 2 inflight not correct\\n\", __LINE__,\n+\t\t\t\t__func__);\n+\t\tgoto err;\n+\t}\n+\n+\t/************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/\n+\t/* port 1 */\n+\tstruct rte_event events[QID1_NUM + QID2_NUM];\n+\tuint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,\n+\t\t\tRTE_DIM(events), 0);\n+\n+\tif (deq_pkts != QID1_NUM) {\n+\t\tprintf(\"%d: Port 1: DEQUEUE inflight failed\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (stats.port_inflight[p1] != QID1_NUM) {\n+\t\tprintf(\"%d: port 1 inflight decrement after DEQ != 0\\n\",\n+\t\t\t\t__LINE__);\n+\t\tgoto err;\n+\t}\n+\tfor (i = 0; i < QID1_NUM; i++) {\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,\n+\t\t\t\t1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: %s rte enqueue of inf release failed\\n\",\n+\t\t\t\t__LINE__, __func__);\n+\t\t\tgoto err;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * As the scheduler core decrements inflights, it needs to run to\n+\t * process packets to act on the drop messages\n+\t */\n+\trte_event_schedule(evdev);\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (stats.port_inflight[p1] != 0) {\n+\t\tprintf(\"%d: port 1 inflight NON NULL after DROP\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\n+\t/* port2 */\n+\tdeq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,\n+\t\t\tRTE_DIM(events), 0);\n+\tif (deq_pkts != QID2_NUM) {\n+\t\tprintf(\"%d: Port 2: DEQUEUE inflight failed\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (stats.port_inflight[p2] != QID2_NUM) {\n+\t\tprintf(\"%d: port 1 inflight decrement after DEQ != 0\\n\",\n+\t\t\t\t__LINE__);\n+\t\tgoto err;\n+\t}\n+\tfor (i = 0; i < QID2_NUM; i++) {\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,\n+\t\t\t\t1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: %s rte enqueue of inf release failed\\n\",\n+\t\t\t\t__LINE__, __func__);\n+\t\t\tgoto err;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * As the scheduler core decrements inflights, it needs to run to\n+\t * process packets to act on the drop messages\n+\t */\n+\trte_event_schedule(evdev);\n+\n+\terr = test_event_dev_stats_get(evdev, &stats);\n+\tif (stats.port_inflight[p2] != 0) {\n+\t\tprintf(\"%d: port 2 inflight NON NULL after DROP\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\tcleanup(t);\n+\treturn 0;\n+\n+err:\n+\trte_event_dev_dump(evdev, stdout);\n+\tcleanup(t);\n+\treturn -1;\n+}\n+\n+static int\n+parallel_basic(struct test *t, int check_order)\n+{\n+\tconst uint8_t rx_port = 0;\n+\tconst uint8_t w1_port = 1;\n+\tconst uint8_t w3_port = 3;\n+\tconst uint8_t tx_port = 4;\n+\tint err;\n+\tint i;\n+\tuint32_t deq_pkts, j;\n+\tstruct rte_mbuf *mbufs[3];\n+\tstruct rte_mbuf *mbufs_out[3];\n+\tconst uint32_t MAGIC_SEQN = 1234;\n+\n+\t/* Create instance with 4 ports */\n+\tif (init(t, 2, tx_port + 1) < 0 ||\n+\t\t\tcreate_ports(t, tx_port + 1) < 0 ||\n+\t\t\t(check_order ?  create_ordered_qids(t, 1) :\n+\t\t\t\tcreate_unordered_qids(t, 1)) < 0 ||\n+\t\t\tcreate_directed_qids(t, 1, &tx_port)) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/*\n+\t * CQ mapping to QID\n+\t * We need three ports, all mapped to the same ordered qid0. Then we'll\n+\t * take a packet out to each port, re-enqueue in reverse order,\n+\t * then make sure the reordering has taken place properly when we\n+\t * dequeue from the tx_port.\n+\t *\n+\t * Simplified test setup diagram:\n+\t *\n+\t * rx_port        w1_port\n+\t *        \\     /         \\\n+\t *         qid0 - w2_port - qid1\n+\t *              \\         /     \\\n+\t *                w3_port        tx_port\n+\t */\n+\t/* CQ mapping to QID for LB ports (directed mapped on create) */\n+\tfor (i = w1_port; i <= w3_port; i++) {\n+\t\terr = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,\n+\t\t\t\t1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: error mapping lb qid\\n\", __LINE__);\n+\t\t\tcleanup(t);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Enqueue 3 packets to the rx port */\n+\tfor (i = 0; i < 3; i++) {\n+\t\tstruct rte_event ev;\n+\t\tmbufs[i] = rte_gen_arp(0, t->mbuf_pool);\n+\t\tif (!mbufs[i]) {\n+\t\t\tprintf(\"%d: gen of pkt failed\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tev.queue_id = t->qid[0];\n+\t\tev.op = RTE_EVENT_OP_NEW;\n+\t\tev.mbuf = mbufs[i];\n+\t\tmbufs[i]->seqn = MAGIC_SEQN + i;\n+\n+\t\t/* generate pkt and enqueue */\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: Failed to enqueue pkt %u, retval = %u\\n\",\n+\t\t\t\t\t__LINE__, i, err);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\trte_event_schedule(evdev);\n+\n+\t/* use extra slot to make logic in loops easier */\n+\tstruct rte_event deq_ev[w3_port + 1];\n+\n+\t/* Dequeue the 3 packets, one from each worker port */\n+\tfor (i = w1_port; i <= w3_port; i++) {\n+\t\tdeq_pkts = rte_event_dequeue_burst(evdev, t->port[i],\n+\t\t\t\t&deq_ev[i], 1, 0);\n+\t\tif (deq_pkts != 1) {\n+\t\t\tprintf(\"%d: Failed to deq\\n\", __LINE__);\n+\t\t\trte_event_dev_dump(evdev, stdout);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* Enqueue each packet in reverse order, flushing after each one */\n+\tfor (i = w3_port; i >= w1_port; i--) {\n+\n+\t\tdeq_ev[i].op = RTE_EVENT_OP_FORWARD;\n+\t\tdeq_ev[i].queue_id = t->qid[1];\n+\t\terr = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);\n+\t\tif (err != 1) {\n+\t\t\tprintf(\"%d: Failed to enqueue\\n\", __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\trte_event_schedule(evdev);\n+\n+\t/* dequeue from the tx ports, we should get 3 packets */\n+\tdeq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,\n+\t\t\t3, 0);\n+\n+\t/* Check to see if we've got all 3 packets */\n+\tif (deq_pkts != 3) {\n+\t\tprintf(\"%d: expected 3 pkts at tx port got %d from port %d\\n\",\n+\t\t\t__LINE__, deq_pkts, tx_port);\n+\t\trte_event_dev_dump(evdev, stdout);\n+\t\treturn 1;\n+\t}\n+\n+\t/* Check to see if the sequence numbers are in expected order */\n+\tif (check_order) {\n+\t\tfor (j = 0 ; j < deq_pkts ; j++) {\n+\t\t\tif (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {\n+\t\t\t\tprintf(\n+\t\t\t\t\t\"%d: Incorrect sequence number(%d) from port %d\\n\",\n+\t\t\t\t\t__LINE__, mbufs_out[j]->seqn, tx_port);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* Destroy the instance */\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n+static int\n+ordered_basic(struct test *t)\n+{\n+\treturn parallel_basic(t, 1);\n+}\n+\n+static int\n+unordered_basic(struct test *t)\n+{\n+\treturn parallel_basic(t, 0);\n+}\n+\n static struct rte_mempool *eventdev_func_mempool;\n \n static int\n test_sw_eventdev(void)\n {\n \tstruct test *t = malloc(sizeof(struct test));\n+\tint ret;\n+\n+\t/* manually initialize the op, older gcc's complain on static\n+\t * initialization of struct elements that are a bitfield.\n+\t */\n+\trelease_ev.op = RTE_EVENT_OP_RELEASE;\n \n \tconst char *eventdev_name = \"event_sw0\";\n \tevdev = rte_event_dev_get_dev_id(eventdev_name);\n@@ -346,6 +1340,72 @@ test_sw_eventdev(void)\n \t}\n \tt->mbuf_pool = eventdev_func_mempool;\n \n+\tprintf(\"*** Running Single Directed Packet test...\\n\");\n+\tret = test_single_directed_packet(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Single Directed Packet test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Single Load Balanced Packet test...\\n\");\n+\tret = single_packet(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Single Packet test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Unordered Basic test...\\n\");\n+\tret = unordered_basic(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR -  Unordered Basic test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Ordered Basic test...\\n\");\n+\tret = ordered_basic(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR -  Ordered Basic test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Burst Packets test...\\n\");\n+\tret = burst_packets(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Burst Packets test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Invalid QID test...\\n\");\n+\tret = invalid_qid(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Invalid QID test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Inflight Count test...\\n\");\n+\tret = inflight_counts(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Inflight Count test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Abuse Inflights test...\\n\");\n+\tret = abuse_inflights(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Abuse Inflights test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Ordered Reconfigure test...\\n\");\n+\tret = ordered_reconfigure(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Ordered Reconfigure test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Port LB Single Reconfig test...\\n\");\n+\tret = port_single_lb_reconfig(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Port LB Single Reconfig test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tprintf(\"*** Running Port Reconfig Credits test...\\n\");\n+\tret = port_reconfig_credits(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Port Reconfig Credits Reset test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n \t/*\n \t * Free test instance, leaving mempool initialized, and a pointer to it\n \t * in static eventdev_func_mempool, as it is re-used on re-runs\n",
    "prefixes": [
        "dpdk-dev",
        "v5",
        "15/20"
    ]
}