get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/65177/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 65177,
    "url": "http://patches.dpdk.org/api/patches/65177/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200128053506.2173-10-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200128053506.2173-10-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200128053506.2173-10-pbhagavatula@marvell.com",
    "date": "2020-01-28T05:35:03",
    "name": "[v6,09/11] examples/l3fwd: add event em main loop",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "2284baf7060c645a8bcd0a998e4d948898b8f3b5",
    "submitter": {
        "id": 1183,
        "url": "http://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200128053506.2173-10-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 8312,
            "url": "http://patches.dpdk.org/api/series/8312/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8312",
            "date": "2020-01-28T05:34:54",
            "name": "example/l3fwd: introduce event device support",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/8312/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/65177/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/65177/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3DC51A04B3;\n\tTue, 28 Jan 2020 06:36:52 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 093841C06C;\n\tTue, 28 Jan 2020 06:36:04 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id C8E3F1C06C\n for <dev@dpdk.org>; Tue, 28 Jan 2020 06:36:02 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n 00S5Yt5R016901; Mon, 27 Jan 2020 21:36:02 -0800",
            "from sc-exch03.marvell.com ([199.233.58.183])\n by mx0b-0016f401.pphosted.com with ESMTP id 2xrp2t2fxm-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Mon, 27 Jan 2020 21:36:02 -0800",
            "from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH03.marvell.com\n (10.93.176.83) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 27 Jan\n 2020 21:35:59 -0800",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com\n (10.93.176.83) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Mon, 27 Jan 2020 21:35:59 -0800",
            "from BG-LT7430.marvell.com (unknown [10.28.17.49])\n by maili.marvell.com (Postfix) with ESMTP id D7BC23F703F;\n Mon, 27 Jan 2020 21:35:55 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0818;\n bh=C7IkNQP++GTjINJFxMXC5sEcouHYMcJzwUu2J6hn6IA=;\n b=x3fHx+kw28lhxTWCdGbXFw2ctwWmWerxYyATgQhRVP7Gf/yBqb5fwTw+cqnxO5jS1sWN\n WMeDD8oU7Kl++yk/+DX/OKMuwubOm7yf+Q5xSjo4exizgFh33nY1WaUWnt3OVRz5aqYh\n tKqzhNl4xLtbKlnZ5M+ottW+jvP0ZP0ULapQUw/j01mc1Rponn7E3Lq6fUMRN29xmYxq\n ziAZ9/Z5X9GTAkrlspS+s/FlL9mANLmbeTKZzUEt3n1G9z7tHRWtTHyHonTowfAF7R9j\n ggpkliLaZkoHTYRZlGMC/o0a+bcO1X2ZXiSXP9ZoRp+jALBUdsWc0VP1QGCzfeE5A5v9 JQ==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, <konstantin.ananyev@intel.com>, Marko Kovacevic\n <marko.kovacevic@intel.com>, Ori Kam <orika@mellanox.com>, Bruce Richardson\n <bruce.richardson@intel.com>, Radu Nicolau <radu.nicolau@intel.com>, \"Akhil\n Goyal\" <akhil.goyal@nxp.com>, Tomasz Kantecki <tomasz.kantecki@intel.com>,\n Sunil Kumar Kori <skori@marvell.com>, Pavan Nikhilesh\n <pbhagavatula@marvell.com>",
        "CC": "<dev@dpdk.org>",
        "Date": "Tue, 28 Jan 2020 11:05:03 +0530",
        "Message-ID": "<20200128053506.2173-10-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200128053506.2173-1-pbhagavatula@marvell.com>",
        "References": "<20200124040542.2360--1-pbhagavatula@marvell.com>\n <20200128053506.2173-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.138, 18.0.572\n definitions=2020-01-27_08:2020-01-24,\n 2020-01-27 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v6 09/11] examples/l3fwd: add event em main loop",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd em main loop for handling events based on capabilities of the\nevent device.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\n---\n examples/l3fwd/l3fwd.h               |  10 ++\n examples/l3fwd/l3fwd_em.c            | 177 +++++++++++++++++++++++++++\n examples/l3fwd/l3fwd_em.h            | 159 +++++++++++++++++-------\n examples/l3fwd/l3fwd_em_hlm.h        | 131 ++++++++++++++++++++\n examples/l3fwd/l3fwd_em_sequential.h |  26 ++++\n examples/l3fwd/l3fwd_event.c         |   9 ++\n examples/l3fwd/main.c                |   5 +-\n 7 files changed, 470 insertions(+), 47 deletions(-)",
    "diff": "diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h\nindex b3cdcd496..c786f9062 100644\n--- a/examples/l3fwd/l3fwd.h\n+++ b/examples/l3fwd/l3fwd.h\n@@ -216,6 +216,16 @@ lpm_event_main_loop_tx_q(__attribute__((unused)) void *dummy);\n int\n lpm_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy);\n \n+int\n+em_event_main_loop_tx_d(__attribute__((unused)) void *dummy);\n+int\n+em_event_main_loop_tx_d_burst(__attribute__((unused)) void *dummy);\n+int\n+em_event_main_loop_tx_q(__attribute__((unused)) void *dummy);\n+int\n+em_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy);\n+\n+\n /* Return ipv4/ipv6 fwd lookup struct for LPM or EM. */\n void *\n em_get_ipv4_l3fwd_lookup_struct(const int socketid);\ndiff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c\nindex 74a7c8fa4..1a8bc9168 100644\n--- a/examples/l3fwd/l3fwd_em.c\n+++ b/examples/l3fwd/l3fwd_em.c\n@@ -26,6 +26,7 @@\n #include <rte_hash.h>\n \n #include \"l3fwd.h\"\n+#include \"l3fwd_event.h\"\n \n #if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_CRC32)\n #define EM_HASH_CRC 1\n@@ -699,6 +700,182 @@ em_main_loop(__attribute__((unused)) void *dummy)\n \treturn 0;\n }\n \n+static __rte_always_inline void\n+em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,\n+\t\tconst uint8_t flags)\n+{\n+\tconst int event_p_id = l3fwd_get_free_event_port(evt_rsrc);\n+\tconst uint8_t tx_q_id = evt_rsrc->evq.event_q_id[\n+\t\tevt_rsrc->evq.nb_queues - 1];\n+\tconst uint8_t event_d_id = evt_rsrc->event_d_id;\n+\tstruct lcore_conf *lconf;\n+\tunsigned int lcore_id;\n+\tstruct rte_event ev;\n+\n+\tif (event_p_id < 0)\n+\t\treturn;\n+\n+\tlcore_id = rte_lcore_id();\n+\tlconf = &lcore_conf[lcore_id];\n+\n+\tRTE_LOG(INFO, L3FWD, \"entering %s on lcore %u\\n\", __func__, lcore_id);\n+\twhile (!force_quit) {\n+\t\tif (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))\n+\t\t\tcontinue;\n+\n+\t\tstruct rte_mbuf *mbuf = ev.mbuf;\n+\n+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON\n+\t\tmbuf->port = em_get_dst_port(lconf, mbuf, mbuf->port);\n+\t\tprocess_packet(mbuf, &mbuf->port);\n+#else\n+\t\tl3fwd_em_simple_process(mbuf, lconf);\n+#endif\n+\t\tif (mbuf->port == BAD_PORT) {\n+\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tif (flags & L3FWD_EVENT_TX_ENQ) {\n+\t\t\tev.queue_id = tx_q_id;\n+\t\t\tev.op = RTE_EVENT_OP_FORWARD;\n+\t\t\twhile (rte_event_enqueue_burst(event_d_id, event_p_id,\n+\t\t\t\t\t\t&ev, 1) && !force_quit)\n+\t\t\t\t;\n+\t\t}\n+\n+\t\tif (flags & L3FWD_EVENT_TX_DIRECT) {\n+\t\t\trte_event_eth_tx_adapter_txq_set(mbuf, 0);\n+\t\t\twhile (!rte_event_eth_tx_adapter_enqueue(event_d_id,\n+\t\t\t\t\t\tevent_p_id, &ev, 1, 0) &&\n+\t\t\t\t\t!force_quit)\n+\t\t\t\t;\n+\t\t}\n+\t}\n+}\n+\n+static __rte_always_inline void\n+em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,\n+\t\tconst uint8_t flags)\n+{\n+\tconst int event_p_id = l3fwd_get_free_event_port(evt_rsrc);\n+\tconst uint8_t tx_q_id = evt_rsrc->evq.event_q_id[\n+\t\tevt_rsrc->evq.nb_queues - 1];\n+\tconst uint8_t event_d_id = evt_rsrc->event_d_id;\n+\tconst uint16_t deq_len = evt_rsrc->deq_depth;\n+\tstruct rte_event events[MAX_PKT_BURST];\n+\tstruct lcore_conf *lconf;\n+\tunsigned int lcore_id;\n+\tint i, nb_enq, nb_deq;\n+\n+\tif (event_p_id < 0)\n+\t\treturn;\n+\n+\tlcore_id = rte_lcore_id();\n+\n+\tlconf = &lcore_conf[lcore_id];\n+\n+\tRTE_LOG(INFO, L3FWD, \"entering %s on lcore %u\\n\", __func__, lcore_id);\n+\n+\twhile (!force_quit) {\n+\t\t/* Read events from RX queues */\n+\t\tnb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,\n+\t\t\t\tevents, deq_len, 0);\n+\t\tif (nb_deq == 0) {\n+\t\t\trte_pause();\n+\t\t\tcontinue;\n+\t\t}\n+\n+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON\n+\t\tl3fwd_em_process_events(nb_deq, (struct rte_event **)&events,\n+\t\t\t\t\tlconf);\n+#else\n+\t\tl3fwd_em_no_opt_process_events(nb_deq,\n+\t\t\t\t\t       (struct rte_event **)&events,\n+\t\t\t\t\t       lconf);\n+#endif\n+\t\tfor (i = 0; i < nb_deq; i++) {\n+\t\t\tif (flags & L3FWD_EVENT_TX_ENQ) {\n+\t\t\t\tevents[i].queue_id = tx_q_id;\n+\t\t\t\tevents[i].op = RTE_EVENT_OP_FORWARD;\n+\t\t\t}\n+\n+\t\t\tif (flags & L3FWD_EVENT_TX_DIRECT)\n+\t\t\t\trte_event_eth_tx_adapter_txq_set(events[i].mbuf,\n+\t\t\t\t\t\t\t\t 0);\n+\t\t}\n+\n+\t\tif (flags & L3FWD_EVENT_TX_ENQ) {\n+\t\t\tnb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,\n+\t\t\t\t\tevents, nb_deq);\n+\t\t\twhile (nb_enq < nb_deq && !force_quit)\n+\t\t\t\tnb_enq += rte_event_enqueue_burst(event_d_id,\n+\t\t\t\t\t\tevent_p_id, events + nb_enq,\n+\t\t\t\t\t\tnb_deq - nb_enq);\n+\t\t}\n+\n+\t\tif (flags & L3FWD_EVENT_TX_DIRECT) {\n+\t\t\tnb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,\n+\t\t\t\t\tevent_p_id, events, nb_deq, 0);\n+\t\t\twhile (nb_enq < nb_deq && !force_quit)\n+\t\t\t\tnb_enq += rte_event_eth_tx_adapter_enqueue(\n+\t\t\t\t\t\tevent_d_id, event_p_id,\n+\t\t\t\t\t\tevents + nb_enq,\n+\t\t\t\t\t\tnb_deq - nb_enq, 0);\n+\t\t}\n+\t}\n+}\n+\n+static __rte_always_inline void\n+em_event_loop(struct l3fwd_event_resources *evt_rsrc,\n+\t\t const uint8_t flags)\n+{\n+\tif (flags & L3FWD_EVENT_SINGLE)\n+\t\tem_event_loop_single(evt_rsrc, flags);\n+\tif (flags & L3FWD_EVENT_BURST)\n+\t\tem_event_loop_burst(evt_rsrc, flags);\n+}\n+\n+int __rte_noinline\n+em_event_main_loop_tx_d(__attribute__((unused)) void *dummy)\n+{\n+\tstruct l3fwd_event_resources *evt_rsrc =\n+\t\t\t\t\tl3fwd_get_eventdev_rsrc();\n+\n+\tem_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);\n+\treturn 0;\n+}\n+\n+int __rte_noinline\n+em_event_main_loop_tx_d_burst(__attribute__((unused)) void *dummy)\n+{\n+\tstruct l3fwd_event_resources *evt_rsrc =\n+\t\t\t\t\tl3fwd_get_eventdev_rsrc();\n+\n+\tem_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);\n+\treturn 0;\n+}\n+\n+int __rte_noinline\n+em_event_main_loop_tx_q(__attribute__((unused)) void *dummy)\n+{\n+\tstruct l3fwd_event_resources *evt_rsrc =\n+\t\t\t\t\tl3fwd_get_eventdev_rsrc();\n+\n+\tem_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);\n+\treturn 0;\n+}\n+\n+int __rte_noinline\n+em_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy)\n+{\n+\tstruct l3fwd_event_resources *evt_rsrc =\n+\t\t\t\t\tl3fwd_get_eventdev_rsrc();\n+\n+\tem_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);\n+\treturn 0;\n+}\n+\n /*\n  * Initialize exact match (hash) parameters.\n  */\ndiff --git a/examples/l3fwd/l3fwd_em.h b/examples/l3fwd/l3fwd_em.h\nindex 090c1b448..b992a21da 100644\n--- a/examples/l3fwd/l3fwd_em.h\n+++ b/examples/l3fwd/l3fwd_em.h\n@@ -5,73 +5,92 @@\n #ifndef __L3FWD_EM_H__\n #define __L3FWD_EM_H__\n \n-static __rte_always_inline void\n-l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,\n-\t\tstruct lcore_conf *qconf)\n+static __rte_always_inline uint16_t\n+l3fwd_em_handle_ipv4(struct rte_mbuf *m, uint16_t portid,\n+\t\t     struct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)\n {\n-\tstruct rte_ether_hdr *eth_hdr;\n \tstruct rte_ipv4_hdr *ipv4_hdr;\n \tuint16_t dst_port;\n-\tuint32_t tcp_or_udp;\n-\tuint32_t l3_ptypes;\n-\n-\teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n-\ttcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);\n-\tl3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;\n \n-\tif (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {\n-\t\t/* Handle IPv4 headers.*/\n-\t\tipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,\n-\t\t\t\t\t\tsizeof(struct rte_ether_hdr));\n+\t/* Handle IPv4 headers.*/\n+\tipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,\n+\t\t\tsizeof(struct rte_ether_hdr));\n \n #ifdef DO_RFC_1812_CHECKS\n-\t\t/* Check to make sure the packet is valid (RFC1812) */\n-\t\tif (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {\n-\t\t\trte_pktmbuf_free(m);\n-\t\t\treturn;\n-\t\t}\n+\t/* Check to make sure the packet is valid (RFC1812) */\n+\tif (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {\n+\t\trte_pktmbuf_free(m);\n+\t\treturn BAD_PORT;\n+\t}\n #endif\n-\t\tdst_port = em_get_ipv4_dst_port(ipv4_hdr, portid,\n-\t\t\t\t\t\tqconf->ipv4_lookup_struct);\n+\tdst_port = em_get_ipv4_dst_port(ipv4_hdr, portid,\n+\t\t\tqconf->ipv4_lookup_struct);\n \n-\t\tif (dst_port >= RTE_MAX_ETHPORTS ||\n+\tif (dst_port >= RTE_MAX_ETHPORTS ||\n \t\t\t(enabled_port_mask & 1 << dst_port) == 0)\n-\t\t\tdst_port = portid;\n+\t\tdst_port = portid;\n \n #ifdef DO_RFC_1812_CHECKS\n-\t\t/* Update time to live and header checksum */\n-\t\t--(ipv4_hdr->time_to_live);\n-\t\t++(ipv4_hdr->hdr_checksum);\n+\t/* Update time to live and header checksum */\n+\t--(ipv4_hdr->time_to_live);\n+\t++(ipv4_hdr->hdr_checksum);\n #endif\n-\t\t/* dst addr */\n-\t\t*(uint64_t *)&eth_hdr->d_addr = dest_eth_addr[dst_port];\n+\t/* dst addr */\n+\t*(uint64_t *)&eth_hdr->d_addr = dest_eth_addr[dst_port];\n \n-\t\t/* src addr */\n-\t\trte_ether_addr_copy(&ports_eth_addr[dst_port],\n-\t\t\t\t&eth_hdr->s_addr);\n+\t/* src addr */\n+\trte_ether_addr_copy(&ports_eth_addr[dst_port],\n+\t\t\t&eth_hdr->s_addr);\n \n-\t\tsend_single_packet(qconf, m, dst_port);\n-\t} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {\n-\t\t/* Handle IPv6 headers.*/\n-\t\tstruct rte_ipv6_hdr *ipv6_hdr;\n+\treturn dst_port;\n+}\n \n-\t\tipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,\n-\t\t\t\t\t\tsizeof(struct rte_ether_hdr));\n+static __rte_always_inline uint16_t\n+l3fwd_em_handle_ipv6(struct rte_mbuf *m, uint16_t portid,\n+\t\tstruct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)\n+{\n+\t/* Handle IPv6 headers.*/\n+\tstruct rte_ipv6_hdr *ipv6_hdr;\n+\tuint16_t dst_port;\n \n-\t\tdst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,\n-\t\t\t\t\tqconf->ipv6_lookup_struct);\n+\tipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,\n+\t\t\tsizeof(struct rte_ether_hdr));\n \n-\t\tif (dst_port >= RTE_MAX_ETHPORTS ||\n+\tdst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,\n+\t\t\tqconf->ipv6_lookup_struct);\n+\n+\tif (dst_port >= RTE_MAX_ETHPORTS ||\n \t\t\t(enabled_port_mask & 1 << dst_port) == 0)\n-\t\t\tdst_port = portid;\n+\t\tdst_port = portid;\n+\n+\t/* dst addr */\n+\t*(uint64_t *)&eth_hdr->d_addr = dest_eth_addr[dst_port];\n \n-\t\t/* dst addr */\n-\t\t*(uint64_t *)&eth_hdr->d_addr = dest_eth_addr[dst_port];\n+\t/* src addr */\n+\trte_ether_addr_copy(&ports_eth_addr[dst_port],\n+\t\t\t&eth_hdr->s_addr);\n \n-\t\t/* src addr */\n-\t\trte_ether_addr_copy(&ports_eth_addr[dst_port],\n-\t\t\t\t&eth_hdr->s_addr);\n+\treturn dst_port;\n+}\n \n+static __rte_always_inline void\n+l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,\n+\t\tstruct lcore_conf *qconf)\n+{\n+\tstruct rte_ether_hdr *eth_hdr;\n+\tuint16_t dst_port;\n+\tuint32_t tcp_or_udp;\n+\tuint32_t l3_ptypes;\n+\n+\teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n+\ttcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);\n+\tl3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;\n+\n+\tif (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {\n+\t\tdst_port = l3fwd_em_handle_ipv4(m, portid, eth_hdr, qconf);\n+\t\tsend_single_packet(qconf, m, dst_port);\n+\t} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {\n+\t\tdst_port = l3fwd_em_handle_ipv6(m, portid, eth_hdr, qconf);\n \t\tsend_single_packet(qconf, m, dst_port);\n \t} else {\n \t\t/* Free the mbuf that contains non-IPV4/IPV6 packet */\n@@ -79,6 +98,25 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,\n \t}\n }\n \n+static __rte_always_inline void\n+l3fwd_em_simple_process(struct rte_mbuf *m, struct lcore_conf *qconf)\n+{\n+\tstruct rte_ether_hdr *eth_hdr;\n+\tuint32_t tcp_or_udp;\n+\tuint32_t l3_ptypes;\n+\n+\teth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n+\ttcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);\n+\tl3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;\n+\n+\tif (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4))\n+\t\tm->port = l3fwd_em_handle_ipv4(m, m->port, eth_hdr, qconf);\n+\telse if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6))\n+\t\tm->port = l3fwd_em_handle_ipv6(m, m->port, eth_hdr, qconf);\n+\telse\n+\t\tm->port = BAD_PORT;\n+}\n+\n /*\n  * Buffer non-optimized handling of packets, invoked\n  * from main_loop.\n@@ -108,4 +146,33 @@ l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,\n \t\tl3fwd_em_simple_forward(pkts_burst[j], portid, qconf);\n }\n \n+/*\n+ * Buffer non-optimized handling of events, invoked\n+ * from main_loop.\n+ */\n+static inline void\n+l3fwd_em_no_opt_process_events(int nb_rx, struct rte_event **events,\n+\t\t\t       struct lcore_conf *qconf)\n+{\n+\tint32_t j;\n+\n+\t/* Prefetch first packets */\n+\tfor (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++)\n+\t\trte_prefetch0(rte_pktmbuf_mtod(events[j]->mbuf, void *));\n+\n+\t/*\n+\t * Prefetch and forward already prefetched\n+\t * packets.\n+\t */\n+\tfor (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {\n+\t\trte_prefetch0(rte_pktmbuf_mtod(events[\n+\t\t\t\tj + PREFETCH_OFFSET]->mbuf, void *));\n+\t\tl3fwd_em_simple_process(events[j]->mbuf, qconf);\n+\t}\n+\n+\t/* Forward remaining prefetched packets */\n+\tfor (; j < nb_rx; j++)\n+\t\tl3fwd_em_simple_process(events[j]->mbuf, qconf);\n+}\n+\n #endif /* __L3FWD_EM_H__ */\ndiff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h\nindex ad8b9ce87..79812716c 100644\n--- a/examples/l3fwd/l3fwd_em_hlm.h\n+++ b/examples/l3fwd/l3fwd_em_hlm.h\n@@ -75,6 +75,60 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],\n \t}\n }\n \n+static __rte_always_inline void\n+em_get_dst_port_ipv4xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[],\n+\t\t\t      uint16_t dst_port[])\n+{\n+\tint i;\n+\tint32_t ret[EM_HASH_LOOKUP_COUNT];\n+\tunion ipv4_5tuple_host key[EM_HASH_LOOKUP_COUNT];\n+\tconst void *key_array[EM_HASH_LOOKUP_COUNT];\n+\n+\tfor (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {\n+\t\tget_ipv4_5tuple(m[i], mask0.x, &key[i]);\n+\t\tkey_array[i] = &key[i];\n+\t}\n+\n+\trte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0],\n+\t\t\t     EM_HASH_LOOKUP_COUNT, ret);\n+\n+\tfor (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {\n+\t\tdst_port[i] = ((ret[i] < 0) ?\n+\t\t\t\tm[i]->port : ipv4_l3fwd_out_if[ret[i]]);\n+\n+\t\tif (dst_port[i] >= RTE_MAX_ETHPORTS ||\n+\t\t\t\t(enabled_port_mask & 1 << dst_port[i]) == 0)\n+\t\t\tdst_port[i] = m[i]->port;\n+\t}\n+}\n+\n+static __rte_always_inline void\n+em_get_dst_port_ipv6xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[],\n+\t\t\t      uint16_t dst_port[])\n+{\n+\tint i;\n+\tint32_t ret[EM_HASH_LOOKUP_COUNT];\n+\tunion ipv6_5tuple_host key[EM_HASH_LOOKUP_COUNT];\n+\tconst void *key_array[EM_HASH_LOOKUP_COUNT];\n+\n+\tfor (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {\n+\t\tget_ipv6_5tuple(m[i], mask1.x, mask2.x, &key[i]);\n+\t\tkey_array[i] = &key[i];\n+\t}\n+\n+\trte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0],\n+\t\t\t     EM_HASH_LOOKUP_COUNT, ret);\n+\n+\tfor (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {\n+\t\tdst_port[i] = ((ret[i] < 0) ?\n+\t\t\t\tm[i]->port : ipv6_l3fwd_out_if[ret[i]]);\n+\n+\t\tif (dst_port[i] >= RTE_MAX_ETHPORTS ||\n+\t\t\t\t(enabled_port_mask & 1 << dst_port[i]) == 0)\n+\t\t\tdst_port[i] = m[i]->port;\n+\t}\n+}\n+\n static __rte_always_inline uint16_t\n em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,\n \t\tuint16_t portid)\n@@ -187,4 +241,81 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,\n \tsend_packets_multi(qconf, pkts_burst, dst_port, nb_rx);\n \n }\n+\n+/*\n+ * Buffer optimized handling of events, invoked\n+ * from main_loop.\n+ */\n+static inline void\n+l3fwd_em_process_events(int nb_rx, struct rte_event **ev,\n+\t\t     struct lcore_conf *qconf)\n+{\n+\tint32_t i, j, pos;\n+\tuint16_t dst_port[MAX_PKT_BURST];\n+\tstruct rte_mbuf *pkts_burst[MAX_PKT_BURST];\n+\n+\t/*\n+\t * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets\n+\t * in groups of EM_HASH_LOOKUP_COUNT.\n+\t */\n+\tint32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);\n+\n+\tfor (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {\n+\t\tpkts_burst[j] = ev[j]->mbuf;\n+\t\trte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],\n+\t\t\t\t\t       struct rte_ether_hdr *) + 1);\n+\t}\n+\n+\tfor (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {\n+\n+\t\tuint32_t pkt_type = RTE_PTYPE_L3_MASK |\n+\t\t\t\t    RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP;\n+\t\tuint32_t l3_type, tcp_or_udp;\n+\n+\t\tfor (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)\n+\t\t\tpkt_type &= pkts_burst[j + i]->packet_type;\n+\n+\t\tl3_type = pkt_type & RTE_PTYPE_L3_MASK;\n+\t\ttcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);\n+\n+\t\tfor (i = 0, pos = j + EM_HASH_LOOKUP_COUNT;\n+\t\t     i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) {\n+\t\t\trte_prefetch0(rte_pktmbuf_mtod(\n+\t\t\t\t\tpkts_burst[pos],\n+\t\t\t\t\tstruct rte_ether_hdr *) + 1);\n+\t\t}\n+\n+\t\tif (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {\n+\n+\t\t\tem_get_dst_port_ipv4xN_events(qconf, &pkts_burst[j],\n+\t\t\t\t\t       &dst_port[j]);\n+\n+\t\t} else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {\n+\n+\t\t\tem_get_dst_port_ipv6xN_events(qconf, &pkts_burst[j],\n+\t\t\t\t\t       &dst_port[j]);\n+\n+\t\t} else {\n+\t\t\tfor (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {\n+\t\t\t\tpkts_burst[j + i]->port = em_get_dst_port(qconf,\n+\t\t\t\t\t\tpkts_burst[j + i],\n+\t\t\t\t\t\tpkts_burst[j + i]->port);\n+\t\t\t\tprocess_packet(pkts_burst[j + i],\n+\t\t\t\t\t\t&pkts_burst[j + i]->port);\n+\t\t\t}\n+\t\t\tcontinue;\n+\t\t}\n+\t\tprocessx4_step3(&pkts_burst[j], &dst_port[j]);\n+\n+\t\tfor (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)\n+\t\t\tpkts_burst[j + i]->port = dst_port[j + i];\n+\n+\t}\n+\n+\tfor (; j < nb_rx; j++) {\n+\t\tpkts_burst[j]->port = em_get_dst_port(qconf, pkts_burst[j],\n+\t\t\t\t\t\t      pkts_burst[j]->port);\n+\t\tprocess_packet(pkts_burst[j], &pkts_burst[j]->port);\n+\t}\n+}\n #endif /* __L3FWD_EM_HLM_H__ */\ndiff --git a/examples/l3fwd/l3fwd_em_sequential.h b/examples/l3fwd/l3fwd_em_sequential.h\nindex 23fe9dec8..b231b9994 100644\n--- a/examples/l3fwd/l3fwd_em_sequential.h\n+++ b/examples/l3fwd/l3fwd_em_sequential.h\n@@ -95,4 +95,30 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,\n \n \tsend_packets_multi(qconf, pkts_burst, dst_port, nb_rx);\n }\n+\n+/*\n+ * Buffer optimized handling of events, invoked\n+ * from main_loop.\n+ */\n+static inline void\n+l3fwd_em_process_events(int nb_rx, struct rte_event **events,\n+\t\t     struct lcore_conf *qconf)\n+{\n+\tint32_t i, j;\n+\n+\trte_prefetch0(rte_pktmbuf_mtod(events[0]->mbuf,\n+\t\t      struct rte_ether_hdr *) + 1);\n+\n+\tfor (i = 1, j = 0; j < nb_rx; i++, j++) {\n+\t\tstruct rte_mbuf *mbuf = events[j]->mbuf;\n+\n+\t\tif (i < nb_rx) {\n+\t\t\trte_prefetch0(rte_pktmbuf_mtod(\n+\t\t\t\t\tevents[i]->mbuf,\n+\t\t\t\t\tstruct rte_ether_hdr *) + 1);\n+\t\t}\n+\t\tmbuf->port = em_get_dst_port(qconf, mbuf, mbuf->port);\n+\t\tprocess_packet(mbuf, &mbuf->port);\n+\t}\n+}\n #endif /* __L3FWD_EM_SEQUENTIAL_H__ */\ndiff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c\nindex ebddd8f97..43c47eade 100644\n--- a/examples/l3fwd/l3fwd_event.c\n+++ b/examples/l3fwd/l3fwd_event.c\n@@ -217,6 +217,12 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)\n \t\t[1][0] = lpm_event_main_loop_tx_q,\n \t\t[1][1] = lpm_event_main_loop_tx_q_burst,\n \t};\n+\tconst event_loop_cb em_event_loop[2][2] = {\n+\t\t[0][0] = em_event_main_loop_tx_d,\n+\t\t[0][1] = em_event_main_loop_tx_d_burst,\n+\t\t[1][0] = em_event_main_loop_tx_q,\n+\t\t[1][1] = em_event_main_loop_tx_q_burst,\n+\t};\n \tuint32_t event_queue_cfg;\n \tint ret;\n \n@@ -251,4 +257,7 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)\n \n \tevt_rsrc->ops.lpm_event_loop = lpm_event_loop[evt_rsrc->tx_mode_q]\n \t\t\t\t\t\t       [evt_rsrc->has_burst];\n+\n+\tevt_rsrc->ops.em_event_loop = em_event_loop[evt_rsrc->tx_mode_q]\n+\t\t\t\t\t\t       [evt_rsrc->has_burst];\n }\ndiff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c\nindex b8bd19b41..c95b1655e 100644\n--- a/examples/l3fwd/main.c\n+++ b/examples/l3fwd/main.c\n@@ -1226,7 +1226,10 @@ main(int argc, char **argv)\n \t/* Configure eventdev parameters if user has requested */\n \tif (evt_rsrc->enabled) {\n \t\tl3fwd_event_resource_setup(&port_conf);\n-\t\tl3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;\n+\t\tif (l3fwd_em_on)\n+\t\t\tl3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;\n+\t\telse\n+\t\t\tl3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;\n \t\tl3fwd_event_service_setup();\n \t} else\n \t\tl3fwd_poll_resource_setup();\n",
    "prefixes": [
        "v6",
        "09/11"
    ]
}