get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/22947/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 22947,
    "url": "http://patches.dpdk.org/api/patches/22947/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1490902250-32164-20-git-send-email-harry.van.haaren@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1490902250-32164-20-git-send-email-harry.van.haaren@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1490902250-32164-20-git-send-email-harry.van.haaren@intel.com",
    "date": "2017-03-30T19:30:47",
    "name": "[dpdk-dev,v7,19/22] test/eventdev: add SW deadlock tests",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "3ac356808a17651ce56169a725282bd4d6be05c3",
    "submitter": {
        "id": 317,
        "url": "http://patches.dpdk.org/api/people/317/?format=api",
        "name": "Van Haaren, Harry",
        "email": "harry.van.haaren@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1490902250-32164-20-git-send-email-harry.van.haaren@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/22947/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/22947/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 77942D0FC;\n\tThu, 30 Mar 2017 21:32:50 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n\tby dpdk.org (Postfix) with ESMTP id DDE4D37AF\n\tfor <dev@dpdk.org>; Thu, 30 Mar 2017 21:31:28 +0200 (CEST)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby orsmga105.jf.intel.com with ESMTP; 30 Mar 2017 12:31:28 -0700",
            "from silpixa00398672.ir.intel.com ([10.237.223.128])\n\tby fmsmga002.fm.intel.com with ESMTP; 30 Mar 2017 12:31:26 -0700"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos; i=\"5.36,248,1486454400\"; d=\"scan'208\";\n\ta=\"1148967758\"",
        "From": "Harry van Haaren <harry.van.haaren@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "jerin.jacob@caviumnetworks.com,\n\tHarry van Haaren <harry.van.haaren@intel.com>,\n\tBruce Richardson <bruce.richardson@intel.com>,\n\tDavid Hunt <david.hunt@intel.com>",
        "Date": "Thu, 30 Mar 2017 20:30:47 +0100",
        "Message-Id": "<1490902250-32164-20-git-send-email-harry.van.haaren@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1490902250-32164-1-git-send-email-harry.van.haaren@intel.com>",
        "References": "<1490829963-106807-1-git-send-email-harry.van.haaren@intel.com>\n\t<1490902250-32164-1-git-send-email-harry.van.haaren@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v7 19/22] test/eventdev: add SW deadlock tests",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This commit adds the worker loopback test to verify\nthat the deadlock avoidance scheme is functioning, and\na holb (head-of-line-blocking) test to ensure the head\nof line blocking avoidance is correct.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nSigned-off-by: David Hunt <david.hunt@intel.com>\nSigned-off-by: Harry van Haaren <harry.van.haaren@intel.com>\n\nAcked-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n test/test/test_eventdev_sw.c | 398 +++++++++++++++++++++++++++++++++++++++++++\n 1 file changed, 398 insertions(+)",
    "diff": "diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c\nindex 89e17b4..fd6447e 100644\n--- a/test/test/test_eventdev_sw.c\n+++ b/test/test/test_eventdev_sw.c\n@@ -100,6 +100,69 @@ rte_gen_arp(int portid, struct rte_mempool *mp)\n \treturn m;\n }\n \n+static void\n+xstats_print(void)\n+{\n+\tconst uint32_t XSTATS_MAX = 1024;\n+\tuint32_t i;\n+\tuint32_t ids[XSTATS_MAX];\n+\tuint64_t values[XSTATS_MAX];\n+\tstruct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];\n+\n+\tfor (i = 0; i < XSTATS_MAX; i++)\n+\t\tids[i] = i;\n+\n+\t/* Device names / values */\n+\tint ret = rte_event_dev_xstats_names_get(evdev,\n+\t\t\t\t\tRTE_EVENT_DEV_XSTATS_DEVICE, 0,\n+\t\t\t\t\txstats_names, ids, XSTATS_MAX);\n+\tif (ret < 0) {\n+\t\tprintf(\"%d: xstats names get() returned error\\n\",\n+\t\t\t__LINE__);\n+\t\treturn;\n+\t}\n+\tret = rte_event_dev_xstats_get(evdev,\n+\t\t\t\t\tRTE_EVENT_DEV_XSTATS_DEVICE,\n+\t\t\t\t\t0, ids, values, ret);\n+\tif (ret > (signed int)XSTATS_MAX)\n+\t\tprintf(\"%s %d: more xstats available than space\\n\",\n+\t\t\t\t__func__, __LINE__);\n+\tfor (i = 0; (signed int)i < ret; i++) {\n+\t\tprintf(\"%d : %s : %\"PRIu64\"\\n\",\n+\t\t\t\ti, xstats_names[i].name, values[i]);\n+\t}\n+\n+\t/* Port names / values */\n+\tret = rte_event_dev_xstats_names_get(evdev,\n+\t\t\t\t\tRTE_EVENT_DEV_XSTATS_PORT, 0,\n+\t\t\t\t\txstats_names, ids, XSTATS_MAX);\n+\tret = rte_event_dev_xstats_get(evdev,\n+\t\t\t\t\tRTE_EVENT_DEV_XSTATS_PORT, 1,\n+\t\t\t\t\tids, values, ret);\n+\tif (ret > (signed int)XSTATS_MAX)\n+\t\tprintf(\"%s %d: more xstats available than space\\n\",\n+\t\t\t\t__func__, __LINE__);\n+\tfor (i = 0; (signed int)i < ret; i++) {\n+\t\tprintf(\"%d : %s : %\"PRIu64\"\\n\",\n+\t\t\t\ti, xstats_names[i].name, values[i]);\n+\t}\n+\n+\t/* Queue names / values */\n+\tret = rte_event_dev_xstats_names_get(evdev,\n+\t\t\t\t\tRTE_EVENT_DEV_XSTATS_QUEUE, 0,\n+\t\t\t\t\txstats_names, ids, XSTATS_MAX);\n+\tret = rte_event_dev_xstats_get(evdev,\n+\t\t\t\t\tRTE_EVENT_DEV_XSTATS_QUEUE,\n+\t\t\t\t\t1, ids, values, ret);\n+\tif (ret > (signed int)XSTATS_MAX)\n+\t\tprintf(\"%s %d: more xstats available than space\\n\",\n+\t\t\t\t__func__, __LINE__);\n+\tfor (i = 0; (signed int)i < ret; i++) {\n+\t\tprintf(\"%d : %s : %\"PRIu64\"\\n\",\n+\t\t\t\ti, xstats_names[i].name, values[i]);\n+\t}\n+}\n+\n /* initialization and config */\n static inline int\n init(struct test *t, int nb_queues, int nb_ports)\n@@ -2600,6 +2663,324 @@ unordered_basic(struct test *t)\n \treturn parallel_basic(t, 0);\n }\n \n+static int\n+holb(struct test *t) /* test to check we avoid basic head-of-line blocking */\n+{\n+\tconst struct rte_event new_ev = {\n+\t\t\t.op = RTE_EVENT_OP_NEW\n+\t\t\t/* all other fields zero */\n+\t};\n+\tstruct rte_event ev = new_ev;\n+\tunsigned int rx_port = 0; /* port we get the first flow on */\n+\tchar rx_port_used_stat[64];\n+\tchar rx_port_free_stat[64];\n+\tchar other_port_used_stat[64];\n+\n+\tif (init(t, 1, 2) < 0 ||\n+\t\t\tcreate_ports(t, 2) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 1) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\tint nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);\n+\tif (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||\n+\t\t\tnb_links != 1) {\n+\t\tprintf(\"%d: Error links queue to ports\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\n+\t/* send one packet and see where it goes, port 0 or 1 */\n+\tif (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {\n+\t\tprintf(\"%d: Error doing first enqueue\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\trte_event_schedule(evdev);\n+\n+\tif (rte_event_dev_xstats_by_name_get(evdev, \"port_0_cq_ring_used\", NULL)\n+\t\t\t!= 1)\n+\t\trx_port = 1;\n+\n+\tsnprintf(rx_port_used_stat, sizeof(rx_port_used_stat),\n+\t\t\t\"port_%u_cq_ring_used\", rx_port);\n+\tsnprintf(rx_port_free_stat, sizeof(rx_port_free_stat),\n+\t\t\t\"port_%u_cq_ring_free\", rx_port);\n+\tsnprintf(other_port_used_stat, sizeof(other_port_used_stat),\n+\t\t\t\"port_%u_cq_ring_used\", rx_port ^ 1);\n+\tif (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)\n+\t\t\t!= 1) {\n+\t\tprintf(\"%d: Error, first event not scheduled\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\n+\t/* now fill up the rx port's queue with one flow to cause HOLB */\n+\tdo {\n+\t\tev = new_ev;\n+\t\tif (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {\n+\t\t\tprintf(\"%d: Error with enqueue\\n\", __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\t\trte_event_schedule(evdev);\n+\t} while (rte_event_dev_xstats_by_name_get(evdev,\n+\t\t\t\trx_port_free_stat, NULL) != 0);\n+\n+\t/* one more packet, which needs to stay in IQ - i.e. HOLB */\n+\tev = new_ev;\n+\tif (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {\n+\t\tprintf(\"%d: Error with enqueue\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\trte_event_schedule(evdev);\n+\n+\t/* check that the other port still has an empty CQ */\n+\tif (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)\n+\t\t\t!= 0) {\n+\t\tprintf(\"%d: Error, second port CQ is not empty\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\t/* check IQ now has one packet */\n+\tif (rte_event_dev_xstats_by_name_get(evdev, \"qid_0_iq_0_used\", NULL)\n+\t\t\t!= 1) {\n+\t\tprintf(\"%d: Error, QID does not have exactly 1 packet\\n\",\n+\t\t\t__LINE__);\n+\t\tgoto err;\n+\t}\n+\n+\t/* send another flow, which should pass the other IQ entry */\n+\tev = new_ev;\n+\tev.flow_id = 1;\n+\tif (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {\n+\t\tprintf(\"%d: Error with enqueue\\n\", __LINE__);\n+\t\tgoto err;\n+\t}\n+\trte_event_schedule(evdev);\n+\n+\tif (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)\n+\t\t\t!= 1) {\n+\t\tprintf(\"%d: Error, second flow did not pass out first\\n\",\n+\t\t\t__LINE__);\n+\t\tgoto err;\n+\t}\n+\n+\tif (rte_event_dev_xstats_by_name_get(evdev, \"qid_0_iq_0_used\", NULL)\n+\t\t\t!= 1) {\n+\t\tprintf(\"%d: Error, QID does not have exactly 1 packet\\n\",\n+\t\t\t__LINE__);\n+\t\tgoto err;\n+\t}\n+\tcleanup(t);\n+\treturn 0;\n+err:\n+\trte_event_dev_dump(evdev, stdout);\n+\tcleanup(t);\n+\treturn -1;\n+}\n+\n+static int\n+worker_loopback_worker_fn(void *arg)\n+{\n+\tstruct test *t = arg;\n+\tuint8_t port = t->port[1];\n+\tint count = 0;\n+\tint enqd;\n+\n+\t/*\n+\t * Takes packets from the input port and then loops them back through\n+\t * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times\n+\t * so each packet goes through 8*16 = 128 times.\n+\t */\n+\tprintf(\"%d: \\tWorker function started\\n\", __LINE__);\n+\twhile (count < NUM_PACKETS) {\n+#define BURST_SIZE 32\n+\t\tstruct rte_event ev[BURST_SIZE];\n+\t\tuint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,\n+\t\t\t\tBURST_SIZE, 0);\n+\t\tif (nb_rx == 0) {\n+\t\t\trte_pause();\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tfor (i = 0; i < nb_rx; i++) {\n+\t\t\tev[i].queue_id++;\n+\t\t\tif (ev[i].queue_id != 8) {\n+\t\t\t\tev[i].op = RTE_EVENT_OP_FORWARD;\n+\t\t\t\tenqd = rte_event_enqueue_burst(evdev, port,\n+\t\t\t\t\t\t&ev[i], 1);\n+\t\t\t\tif (enqd != 1) {\n+\t\t\t\t\tprintf(\"%d: Can't enqueue FWD!!\\n\",\n+\t\t\t\t\t\t\t__LINE__);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\tev[i].queue_id = 0;\n+\t\t\tev[i].mbuf->udata64++;\n+\t\t\tif (ev[i].mbuf->udata64 != 16) {\n+\t\t\t\tev[i].op = RTE_EVENT_OP_FORWARD;\n+\t\t\t\tenqd = rte_event_enqueue_burst(evdev, port,\n+\t\t\t\t\t\t&ev[i], 1);\n+\t\t\t\tif (enqd != 1) {\n+\t\t\t\t\tprintf(\"%d: Can't enqueue FWD!!\\n\",\n+\t\t\t\t\t\t\t__LINE__);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\t/* we have hit 16 iterations through system - drop */\n+\t\t\trte_pktmbuf_free(ev[i].mbuf);\n+\t\t\tcount++;\n+\t\t\tev[i].op = RTE_EVENT_OP_RELEASE;\n+\t\t\tenqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);\n+\t\t\tif (enqd != 1) {\n+\t\t\t\tprintf(\"%d drop enqueue failed\\n\", __LINE__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+worker_loopback_producer_fn(void *arg)\n+{\n+\tstruct test *t = arg;\n+\tuint8_t port = t->port[0];\n+\tuint64_t count = 0;\n+\n+\tprintf(\"%d: \\tProducer function started\\n\", __LINE__);\n+\twhile (count < NUM_PACKETS) {\n+\t\tstruct rte_mbuf *m = 0;\n+\t\tdo {\n+\t\t\tm = rte_pktmbuf_alloc(t->mbuf_pool);\n+\t\t} while (m == NULL);\n+\n+\t\tm->udata64 = 0;\n+\n+\t\tstruct rte_event ev = {\n+\t\t\t\t.op = RTE_EVENT_OP_NEW,\n+\t\t\t\t.queue_id = t->qid[0],\n+\t\t\t\t.flow_id = (uintptr_t)m & 0xFFFF,\n+\t\t\t\t.mbuf = m,\n+\t\t};\n+\n+\t\tif (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {\n+\t\t\twhile (rte_event_enqueue_burst(evdev, port, &ev, 1) !=\n+\t\t\t\t\t1)\n+\t\t\t\trte_pause();\n+\t\t}\n+\n+\t\tcount++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+worker_loopback(struct test *t)\n+{\n+\t/* use a single producer core, and a worker core to see what happens\n+\t * if the worker loops packets back multiple times\n+\t */\n+\tstruct test_event_dev_stats stats;\n+\tuint64_t print_cycles = 0, cycles = 0;\n+\tuint64_t tx_pkts = 0;\n+\tint err;\n+\tint w_lcore, p_lcore;\n+\n+\tif (init(t, 8, 2) < 0 ||\n+\t\t\tcreate_atomic_qids(t, 8) < 0) {\n+\t\tprintf(\"%d: Error initializing device\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* RX with low max events */\n+\tstatic struct rte_event_port_conf conf = {\n+\t\t\t.dequeue_depth = 32,\n+\t\t\t.enqueue_depth = 64,\n+\t};\n+\t/* beware: this cannot be initialized in the static above as it would\n+\t * only be initialized once - and this needs to be set for multiple runs\n+\t */\n+\tconf.new_event_threshold = 512;\n+\n+\tif (rte_event_port_setup(evdev, 0, &conf) < 0) {\n+\t\tprintf(\"Error setting up RX port\\n\");\n+\t\treturn -1;\n+\t}\n+\tt->port[0] = 0;\n+\t/* TX with higher max events */\n+\tconf.new_event_threshold = 4096;\n+\tif (rte_event_port_setup(evdev, 1, &conf) < 0) {\n+\t\tprintf(\"Error setting up TX port\\n\");\n+\t\treturn -1;\n+\t}\n+\tt->port[1] = 1;\n+\n+\t/* CQ mapping to QID */\n+\terr = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);\n+\tif (err != 8) { /* should have mapped all queues*/\n+\t\tprintf(\"%d: error mapping port 2 to all qids\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_event_dev_start(evdev) < 0) {\n+\t\tprintf(\"%d: Error with start call\\n\", __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tp_lcore = rte_get_next_lcore(\n+\t\t\t/* start core */ -1,\n+\t\t\t/* skip master */ 1,\n+\t\t\t/* wrap */ 0);\n+\tw_lcore = rte_get_next_lcore(p_lcore, 1, 0);\n+\n+\trte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);\n+\trte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);\n+\n+\tprint_cycles = cycles = rte_get_timer_cycles();\n+\twhile (rte_eal_get_lcore_state(p_lcore) != FINISHED ||\n+\t\t\trte_eal_get_lcore_state(w_lcore) != FINISHED) {\n+\n+\t\trte_event_schedule(evdev);\n+\n+\t\tuint64_t new_cycles = rte_get_timer_cycles();\n+\n+\t\tif (new_cycles - print_cycles > rte_get_timer_hz()) {\n+\t\t\ttest_event_dev_stats_get(evdev, &stats);\n+\t\t\tprintf(\n+\t\t\t\t\"%d: \\tSched Rx = %\"PRIu64\", Tx = %\"PRIu64\"\\n\",\n+\t\t\t\t__LINE__, stats.rx_pkts, stats.tx_pkts);\n+\n+\t\t\tprint_cycles = new_cycles;\n+\t\t}\n+\t\tif (new_cycles - cycles > rte_get_timer_hz() * 3) {\n+\t\t\ttest_event_dev_stats_get(evdev, &stats);\n+\t\t\tif (stats.tx_pkts == tx_pkts) {\n+\t\t\t\trte_event_dev_dump(evdev, stdout);\n+\t\t\t\tprintf(\"Dumping xstats:\\n\");\n+\t\t\t\txstats_print();\n+\t\t\t\tprintf(\n+\t\t\t\t\t\"%d: No schedules for seconds, deadlock\\n\",\n+\t\t\t\t\t__LINE__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\ttx_pkts = stats.tx_pkts;\n+\t\t\tcycles = new_cycles;\n+\t\t}\n+\t}\n+\trte_event_schedule(evdev); /* ensure all completions are flushed */\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\tcleanup(t);\n+\treturn 0;\n+}\n+\n static struct rte_mempool *eventdev_func_mempool;\n \n static int\n@@ -2778,6 +3159,23 @@ test_sw_eventdev(void)\n \t\tprintf(\"ERROR - Port Reconfig Credits Reset test FAILED.\\n\");\n \t\treturn ret;\n \t}\n+\tprintf(\"*** Running Head-of-line-blocking test...\\n\");\n+\tret = holb(t);\n+\tif (ret != 0) {\n+\t\tprintf(\"ERROR - Head-of-line-blocking test FAILED.\\n\");\n+\t\treturn ret;\n+\t}\n+\tif (rte_lcore_count() >= 3) {\n+\t\tprintf(\"*** Running Worker loopback test...\\n\");\n+\t\tret = worker_loopback(t);\n+\t\tif (ret != 0) {\n+\t\t\tprintf(\"ERROR - Worker loopback test FAILED.\\n\");\n+\t\t\treturn ret;\n+\t\t}\n+\t} else {\n+\t\tprintf(\"### Not enough cores for worker loopback test.\\n\");\n+\t\tprintf(\"### Need at least 3 cores for test.\\n\");\n+\t}\n \t/*\n \t * Free test instance, leaving mempool initialized, and a pointer to it\n \t * in static eventdev_func_mempool, as it is re-used on re-runs\n",
    "prefixes": [
        "dpdk-dev",
        "v7",
        "19/22"
    ]
}