get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/63652/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 63652,
    "url": "http://patches.dpdk.org/api/patches/63652/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1575808249-31135-12-git-send-email-anoobj@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1575808249-31135-12-git-send-email-anoobj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1575808249-31135-12-git-send-email-anoobj@marvell.com",
    "date": "2019-12-08T12:30:46",
    "name": "[11/14] examples/ipsec-secgw: add app processing code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f4219c7681a5bfb58d3c5493ce80ef0cd30de1bb",
    "submitter": {
        "id": 1205,
        "url": "http://patches.dpdk.org/api/people/1205/?format=api",
        "name": "Anoob Joseph",
        "email": "anoobj@marvell.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1575808249-31135-12-git-send-email-anoobj@marvell.com/mbox/",
    "series": [
        {
            "id": 7750,
            "url": "http://patches.dpdk.org/api/series/7750/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7750",
            "date": "2019-12-08T12:30:35",
            "name": "add eventmode to ipsec-secgw",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/7750/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/63652/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/63652/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 87798A04F1;\n\tSun,  8 Dec 2019 13:33:18 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 11F661BFB1;\n\tSun,  8 Dec 2019 13:32:19 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 025DD1BFB1\n for <dev@dpdk.org>; Sun,  8 Dec 2019 13:32:16 +0100 (CET)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n xB8CVB1n021884; Sun, 8 Dec 2019 04:32:16 -0800",
            "from sc-exch01.marvell.com ([199.233.58.181])\n by mx0a-0016f401.pphosted.com with ESMTP id 2wrbawjm7y-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Sun, 08 Dec 2019 04:32:16 -0800",
            "from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH01.marvell.com\n (10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Sun, 8 Dec\n 2019 04:32:14 -0800",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com\n (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n Transport; Sun, 8 Dec 2019 04:32:14 -0800",
            "from ajoseph83.caveonetworks.com.com (unknown [10.29.45.60])\n by maili.marvell.com (Postfix) with ESMTP id 213EA3F703F;\n Sun,  8 Dec 2019 04:32:09 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0818;\n bh=wp6BKr/k6f1/A2lrg7BxNBPQmF10qtQRM7Bu2NqslRk=;\n b=QbpsNmrRRjQmOlo3CY7/Yk3ULR4pLG9m/PbLZIpi+ZK6jTSSl9/Ctt8Mw5s1VhBQDnOY\n LLQ7kvs47eiM3DQE2gSKrYTgmavc8uWRRz+tngcw7rQLzw3vuDPW39Di69UqsLIiL06P\n XdmpCHfSSlYU6fy3L+8NN7zMrA09ItXAVYOBX6FTOToyf0fEapCYhEZys+/XK52x604T\n NOu2LnXYsyiz+xcN5qkfdYtLRkFyAZpWecA7tNesbqPZVpu/qg/zeLxgWer5Dq8JJnlb\n kQRr0999WB/7FN6zIPW1Tl2aa/N/RRcZeXp+pYc3OWWQfeNzTYza393u/hShfjPqd8c3 jQ==",
        "From": "Anoob Joseph <anoobj@marvell.com>",
        "To": "Akhil Goyal <akhil.goyal@nxp.com>, Radu Nicolau <radu.nicolau@intel.com>,\n Thomas Monjalon <thomas@monjalon.net>",
        "CC": "Lukasz Bartosik <lbartosik@marvell.com>, Jerin Jacob <jerinj@marvell.com>,\n Narayana Prasad <pathreya@marvell.com>,\n Ankur Dwivedi <adwivedi@marvell.com>, Anoob Joseph <anoobj@marvell.com>,\n Archana Muniganti <marchana@marvell.com>,\n Tejasree Kondoj <ktejasree@marvell.com>, Vamsi Attunuru\n <vattunuru@marvell.com>,\n Konstantin Ananyev <konstantin.ananyev@intel.com>, <dev@dpdk.org>",
        "Date": "Sun, 8 Dec 2019 18:00:46 +0530",
        "Message-ID": "<1575808249-31135-12-git-send-email-anoobj@marvell.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1575808249-31135-1-git-send-email-anoobj@marvell.com>",
        "References": "<1575808249-31135-1-git-send-email-anoobj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.95,18.0.572\n definitions=2019-12-08_03:2019-12-05,2019-12-08 signatures=0",
        "Subject": "[dpdk-dev] [PATCH 11/14] examples/ipsec-secgw: add app processing\n\tcode",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Lukasz Bartosik <lbartosik@marvell.com>\n\nAdd IPsec application processing code for event mode.\n\nSigned-off-by: Anoob Joseph <anoobj@marvell.com>\nSigned-off-by: Lukasz Bartosik <lbartosik@marvell.com>\n---\n examples/ipsec-secgw/ipsec-secgw.c  | 124 ++++++------------\n examples/ipsec-secgw/ipsec-secgw.h  |  81 ++++++++++++\n examples/ipsec-secgw/ipsec.h        |  37 +++---\n examples/ipsec-secgw/ipsec_worker.c | 242 ++++++++++++++++++++++++++++++++++--\n examples/ipsec-secgw/ipsec_worker.h |  39 ++++++\n examples/ipsec-secgw/sa.c           |  11 --\n 6 files changed, 409 insertions(+), 125 deletions(-)\n create mode 100644 examples/ipsec-secgw/ipsec-secgw.h\n create mode 100644 examples/ipsec-secgw/ipsec_worker.h",
    "diff": "diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c\nindex c5d95b9..2e7d4d8 100644\n--- a/examples/ipsec-secgw/ipsec-secgw.c\n+++ b/examples/ipsec-secgw/ipsec-secgw.c\n@@ -50,12 +50,11 @@\n \n #include \"event_helper.h\"\n #include \"ipsec.h\"\n+#include \"ipsec_worker.h\"\n #include \"parser.h\"\n \n volatile bool force_quit;\n \n-#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1\n-\n #define MAX_JUMBO_PKT_LEN  9600\n \n #define MEMPOOL_CACHE_SIZE 256\n@@ -70,8 +69,6 @@ volatile bool force_quit;\n \n #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */\n \n-#define NB_SOCKETS 4\n-\n /* Configure how many packets ahead to prefetch, when reading packets */\n #define PREFETCH_OFFSET\t3\n \n@@ -79,8 +76,6 @@ volatile bool force_quit;\n \n #define MAX_LCORE_PARAMS 1024\n \n-#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))\n-\n /*\n  * Configurable number of RX/TX ring descriptors\n  */\n@@ -89,29 +84,6 @@ volatile bool force_quit;\n static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;\n static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;\n \n-#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN\n-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \\\n-\t(((uint64_t)((a) & 0xff) << 56) | \\\n-\t((uint64_t)((b) & 0xff) << 48) | \\\n-\t((uint64_t)((c) & 0xff) << 40) | \\\n-\t((uint64_t)((d) & 0xff) << 32) | \\\n-\t((uint64_t)((e) & 0xff) << 24) | \\\n-\t((uint64_t)((f) & 0xff) << 16) | \\\n-\t((uint64_t)((g) & 0xff) << 8)  | \\\n-\t((uint64_t)(h) & 0xff))\n-#else\n-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \\\n-\t(((uint64_t)((h) & 0xff) << 56) | \\\n-\t((uint64_t)((g) & 0xff) << 48) | \\\n-\t((uint64_t)((f) & 0xff) << 40) | \\\n-\t((uint64_t)((e) & 0xff) << 32) | \\\n-\t((uint64_t)((d) & 0xff) << 24) | \\\n-\t((uint64_t)((c) & 0xff) << 16) | \\\n-\t((uint64_t)((b) & 0xff) << 8) | \\\n-\t((uint64_t)(a) & 0xff))\n-#endif\n-#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))\n-\n #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \\\n \t\t(addr)->addr_bytes[0], (addr)->addr_bytes[1], \\\n \t\t(addr)->addr_bytes[2], (addr)->addr_bytes[3], \\\n@@ -123,18 +95,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;\n \n #define MTU_TO_FRAMELEN(x)\t((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)\n \n-/* port/source ethernet addr and destination ethernet addr */\n-struct ethaddr_info {\n-\tuint64_t src, dst;\n-};\n-\n-struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {\n-\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },\n-\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },\n-\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },\n-\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }\n-};\n-\n struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];\n \n #define CMD_LINE_OPT_CONFIG\t\t\"config\"\n@@ -192,10 +152,16 @@ static const struct option lgopts[] = {\n \t{NULL, 0, 0, 0}\n };\n \n+struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }\n+};\n+\n /* mask of enabled ports */\n static uint32_t enabled_port_mask;\n static uint64_t enabled_cryptodev_mask = UINT64_MAX;\n-static uint32_t unprotected_port_mask;\n static int32_t promiscuous_on = 1;\n static int32_t numa_on = 1; /**< NUMA is enabled by default. */\n static uint32_t nb_lcores;\n@@ -283,8 +249,6 @@ static struct rte_eth_conf port_conf = {\n \t},\n };\n \n-static struct socket_ctx socket_ctx[NB_SOCKETS];\n-\n /*\n  * Determine is multi-segment support required:\n  *  - either frame buffer size is smaller then mtu\n@@ -2828,47 +2792,10 @@ main(int32_t argc, char **argv)\n \n \t\tsa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);\n \t\tport_init(portid, req_rx_offloads, req_tx_offloads);\n-\t\t/* Create default ipsec flow for the ethernet device */\n-\t\tret = create_default_ipsec_flow(portid, req_rx_offloads);\n-\t\tif (ret)\n-\t\t\tprintf(\"Cannot create default flow, err=%d, port=%d\\n\",\n-\t\t\t\t\tret, portid);\n \t}\n \n \tcryptodevs_init();\n \n-\t/* start ports */\n-\tRTE_ETH_FOREACH_DEV(portid) {\n-\t\tif ((enabled_port_mask & (1 << portid)) == 0)\n-\t\t\tcontinue;\n-\n-\t\t/*\n-\t\t * Start device\n-\t\t * note: device must be started before a flow rule\n-\t\t * can be installed.\n-\t\t */\n-\t\tret = rte_eth_dev_start(portid);\n-\t\tif (ret < 0)\n-\t\t\trte_exit(EXIT_FAILURE, \"rte_eth_dev_start: \"\n-\t\t\t\t\t\"err=%d, port=%d\\n\", ret, portid);\n-\t\t/*\n-\t\t * If enabled, put device in promiscuous mode.\n-\t\t * This allows IO forwarding mode to forward packets\n-\t\t * to itself through 2 cross-connected  ports of the\n-\t\t * target machine.\n-\t\t */\n-\t\tif (promiscuous_on) {\n-\t\t\tret = rte_eth_promiscuous_enable(portid);\n-\t\t\tif (ret != 0)\n-\t\t\t\trte_exit(EXIT_FAILURE,\n-\t\t\t\t\t\"rte_eth_promiscuous_enable: err=%s, port=%d\\n\",\n-\t\t\t\t\trte_strerror(-ret), portid);\n-\t\t}\n-\n-\t\trte_eth_dev_callback_register(portid,\n-\t\t\tRTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);\n-\t}\n-\n \t/* fragment reassemble is enabled */\n \tif (frag_tbl_sz != 0) {\n \t\tret = reassemble_init();\n@@ -2889,8 +2816,6 @@ main(int32_t argc, char **argv)\n \t\t}\n \t}\n \n-\tcheck_all_ports_link_status(enabled_port_mask);\n-\n \t/*\n \t * Set the enabled port mask in helper config for use by helper\n \t * sub-system. This will be used while intializing devices using\n@@ -2903,6 +2828,39 @@ main(int32_t argc, char **argv)\n \tif (ret < 0)\n \t\trte_exit(EXIT_FAILURE, \"eh_devs_init failed, err=%d\\n\", ret);\n \n+\t/* Create default ipsec flow for each port and start each port */\n+\tRTE_ETH_FOREACH_DEV(portid) {\n+\t\tif ((enabled_port_mask & (1 << portid)) == 0)\n+\t\t\tcontinue;\n+\n+\t\tret = create_default_ipsec_flow(portid, req_rx_offloads);\n+\t\tif (ret)\n+\t\t\tprintf(\"create_default_ipsec_flow failed, err=%d, \"\n+\t\t\t       \"port=%d\\n\", ret, portid);\n+\t\t/*\n+\t\t * Start device\n+\t\t * note: device must be started before a flow rule\n+\t\t * can be installed.\n+\t\t */\n+\t\tret = rte_eth_dev_start(portid);\n+\t\tif (ret < 0)\n+\t\t\trte_exit(EXIT_FAILURE, \"rte_eth_dev_start: \"\n+\t\t\t\t\t\"err=%d, port=%d\\n\", ret, portid);\n+\t\t/*\n+\t\t * If enabled, put device in promiscuous mode.\n+\t\t * This allows IO forwarding mode to forward packets\n+\t\t * to itself through 2 cross-connected  ports of the\n+\t\t * target machine.\n+\t\t */\n+\t\tif (promiscuous_on)\n+\t\t\trte_eth_promiscuous_enable(portid);\n+\n+\t\trte_eth_dev_callback_register(portid,\n+\t\t\tRTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);\n+\t}\n+\n+\tcheck_all_ports_link_status(enabled_port_mask);\n+\n \t/* launch per-lcore init on every lcore */\n \trte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);\n \ndiff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h\nnew file mode 100644\nindex 0000000..67e1193\n--- /dev/null\n+++ b/examples/ipsec-secgw/ipsec-secgw.h\n@@ -0,0 +1,81 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Cavium, Inc\n+ */\n+#ifndef _IPSEC_SECGW_H_\n+#define _IPSEC_SECGW_H_\n+\n+#include <rte_hash.h>\n+\n+#define MAX_PKT_BURST 32\n+\n+#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1\n+\n+#define NB_SOCKETS 4\n+\n+#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))\n+\n+#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN\n+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \\\n+\t(((uint64_t)((a) & 0xff) << 56) | \\\n+\t((uint64_t)((b) & 0xff) << 48) | \\\n+\t((uint64_t)((c) & 0xff) << 40) | \\\n+\t((uint64_t)((d) & 0xff) << 32) | \\\n+\t((uint64_t)((e) & 0xff) << 24) | \\\n+\t((uint64_t)((f) & 0xff) << 16) | \\\n+\t((uint64_t)((g) & 0xff) << 8)  | \\\n+\t((uint64_t)(h) & 0xff))\n+#else\n+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \\\n+\t(((uint64_t)((h) & 0xff) << 56) | \\\n+\t((uint64_t)((g) & 0xff) << 48) | \\\n+\t((uint64_t)((f) & 0xff) << 40) | \\\n+\t((uint64_t)((e) & 0xff) << 32) | \\\n+\t((uint64_t)((d) & 0xff) << 24) | \\\n+\t((uint64_t)((c) & 0xff) << 16) | \\\n+\t((uint64_t)((b) & 0xff) << 8) | \\\n+\t((uint64_t)(a) & 0xff))\n+#endif\n+\n+#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))\n+\n+struct traffic_type {\n+\tconst uint8_t *data[MAX_PKT_BURST * 2];\n+\tstruct rte_mbuf *pkts[MAX_PKT_BURST * 2];\n+\tvoid *saptr[MAX_PKT_BURST * 2];\n+\tuint32_t res[MAX_PKT_BURST * 2];\n+\tuint32_t num;\n+};\n+\n+struct ipsec_traffic {\n+\tstruct traffic_type ipsec;\n+\tstruct traffic_type ip4;\n+\tstruct traffic_type ip6;\n+};\n+\n+/* Fields optimized for devices without burst */\n+struct traffic_type_nb {\n+\tconst uint8_t *data;\n+\tstruct rte_mbuf *pkt;\n+\tuint32_t res;\n+\tuint32_t num;\n+};\n+\n+struct ipsec_traffic_nb {\n+\tstruct traffic_type_nb ipsec;\n+\tstruct traffic_type_nb ip4;\n+\tstruct traffic_type_nb ip6;\n+};\n+\n+/* port/source ethernet addr and destination ethernet addr */\n+struct ethaddr_info {\n+\tuint64_t src, dst;\n+};\n+\n+struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];\n+\n+/* TODO: All var definitions need to be part of a .c file */\n+\n+/* Port mask to identify the unprotected ports */\n+uint32_t unprotected_port_mask;\n+\n+#endif /* _IPSEC_SECGW_H_ */\ndiff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h\nindex 0b9fc04..0c5ee8a 100644\n--- a/examples/ipsec-secgw/ipsec.h\n+++ b/examples/ipsec-secgw/ipsec.h\n@@ -13,11 +13,11 @@\n #include <rte_flow.h>\n #include <rte_ipsec.h>\n \n-#define RTE_LOGTYPE_IPSEC       RTE_LOGTYPE_USER1\n+#include \"ipsec-secgw.h\"\n+\n #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2\n #define RTE_LOGTYPE_IPSEC_IPIP  RTE_LOGTYPE_USER3\n \n-#define MAX_PKT_BURST 32\n #define MAX_INFLIGHT 128\n #define MAX_QP_PER_LCORE 256\n \n@@ -153,6 +153,17 @@ struct ipsec_sa {\n \tstruct rte_security_session_conf sess_conf;\n } __rte_cache_aligned;\n \n+struct sa_ctx {\n+\tvoid *satbl; /* pointer to array of rte_ipsec_sa objects*/\n+\tstruct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];\n+\tunion {\n+\t\tstruct {\n+\t\t\tstruct rte_crypto_sym_xform a;\n+\t\t\tstruct rte_crypto_sym_xform b;\n+\t\t};\n+\t} xf[IPSEC_SA_MAX_ENTRIES];\n+};\n+\n struct ipsec_mbuf_metadata {\n \tstruct ipsec_sa *sa;\n \tstruct rte_crypto_op cop;\n@@ -233,26 +244,8 @@ struct cnt_blk {\n \tuint32_t cnt;\n } __attribute__((packed));\n \n-struct traffic_type {\n-\tconst uint8_t *data[MAX_PKT_BURST * 2];\n-\tstruct rte_mbuf *pkts[MAX_PKT_BURST * 2];\n-\tvoid *saptr[MAX_PKT_BURST * 2];\n-\tuint32_t res[MAX_PKT_BURST * 2];\n-\tuint32_t num;\n-};\n-\n-struct ipsec_traffic {\n-\tstruct traffic_type ipsec;\n-\tstruct traffic_type ip4;\n-\tstruct traffic_type ip6;\n-};\n-\n-\n-void\n-ipsec_poll_mode_worker(void);\n-\n-int\n-ipsec_launch_one_lcore(void *args);\n+/* Socket ctx */\n+struct socket_ctx socket_ctx[NB_SOCKETS];\n \n uint16_t\n ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],\ndiff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c\nindex fce274a..2af9475 100644\n--- a/examples/ipsec-secgw/ipsec_worker.c\n+++ b/examples/ipsec-secgw/ipsec_worker.c\n@@ -15,6 +15,7 @@\n #include <ctype.h>\n #include <stdbool.h>\n \n+#include <rte_acl.h>\n #include <rte_common.h>\n #include <rte_log.h>\n #include <rte_memcpy.h>\n@@ -29,12 +30,51 @@\n #include <rte_eventdev.h>\n #include <rte_malloc.h>\n #include <rte_mbuf.h>\n+#include <rte_lpm.h>\n+#include <rte_lpm6.h>\n \n #include \"ipsec.h\"\n+#include \"ipsec_worker.h\"\n #include \"event_helper.h\"\n \n extern volatile bool force_quit;\n \n+static inline enum pkt_type\n+process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)\n+{\n+\tstruct rte_ether_hdr *eth;\n+\n+\teth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n+\tif (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {\n+\t\t*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +\n+\t\t\t\toffsetof(struct ip, ip_p));\n+\t\tif (**nlp == IPPROTO_ESP)\n+\t\t\treturn PKT_TYPE_IPSEC_IPV4;\n+\t\telse\n+\t\t\treturn PKT_TYPE_PLAIN_IPV4;\n+\t} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {\n+\t\t*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +\n+\t\t\t\toffsetof(struct ip6_hdr, ip6_nxt));\n+\t\tif (**nlp == IPPROTO_ESP)\n+\t\t\treturn PKT_TYPE_IPSEC_IPV6;\n+\t\telse\n+\t\t\treturn PKT_TYPE_PLAIN_IPV6;\n+\t}\n+\n+\t/* Unknown/Unsupported type */\n+\treturn PKT_TYPE_INVALID;\n+}\n+\n+static inline void\n+update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)\n+{\n+\tstruct rte_ether_hdr *ethhdr;\n+\n+\tethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);\n+\tmemcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);\n+\tmemcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);\n+}\n+\n static inline void\n ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)\n {\n@@ -45,6 +85,177 @@ ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)\n \trte_event_eth_tx_adapter_txq_set(m, 0);\n }\n \n+static inline int\n+check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)\n+{\n+\tuint32_t res;\n+\n+\tif (unlikely(sp == NULL))\n+\t\treturn 0;\n+\n+\trte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,\n+\t\t\tDEFAULT_MAX_CATEGORIES);\n+\n+\tif (unlikely(res == 0)) {\n+\t\t/* No match */\n+\t\treturn 0;\n+\t}\n+\n+\tif (res == DISCARD)\n+\t\treturn 0;\n+\telse if (res == BYPASS) {\n+\t\t*sa_idx = 0;\n+\t\treturn 1;\n+\t}\n+\n+\t*sa_idx = SPI2IDX(res);\n+\tif (*sa_idx < IPSEC_SA_MAX_ENTRIES)\n+\t\treturn 1;\n+\n+\t/* Invalid SA IDX */\n+\treturn 0;\n+}\n+\n+static inline uint16_t\n+route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)\n+{\n+\tuint32_t dst_ip;\n+\tuint16_t offset;\n+\tuint32_t hop;\n+\tint ret;\n+\n+\toffset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);\n+\tdst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);\n+\tdst_ip = rte_be_to_cpu_32(dst_ip);\n+\n+\tret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);\n+\n+\tif (ret == 0) {\n+\t\t/* We have a hit */\n+\t\treturn hop;\n+\t}\n+\n+\t/* else */\n+\treturn RTE_MAX_ETHPORTS;\n+}\n+\n+/* TODO: To be tested */\n+static inline uint16_t\n+route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)\n+{\n+\tuint8_t dst_ip[16];\n+\tuint8_t *ip6_dst;\n+\tuint16_t offset;\n+\tuint32_t hop;\n+\tint ret;\n+\n+\toffset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);\n+\tip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);\n+\tmemcpy(&dst_ip[0], ip6_dst, 16);\n+\n+\tret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);\n+\n+\tif (ret == 0) {\n+\t\t/* We have a hit */\n+\t\treturn hop;\n+\t}\n+\n+\t/* else */\n+\treturn RTE_MAX_ETHPORTS;\n+}\n+\n+static inline uint16_t\n+get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)\n+{\n+\tif (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)\n+\t\treturn route4_pkt(pkt, rt->rt4_ctx);\n+\telse if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)\n+\t\treturn route6_pkt(pkt, rt->rt6_ctx);\n+\n+\treturn RTE_MAX_ETHPORTS;\n+}\n+\n+static inline int\n+process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,\n+\t\tstruct rte_event *ev)\n+{\n+\tstruct ipsec_sa *sa = NULL;\n+\tstruct rte_mbuf *pkt;\n+\tuint16_t port_id = 0;\n+\tenum pkt_type type;\n+\tuint32_t sa_idx;\n+\tuint8_t *nlp;\n+\n+\t/* Get pkt from event */\n+\tpkt = ev->mbuf;\n+\n+\t/* Check the packet type */\n+\ttype = process_ipsec_get_pkt_type(pkt, &nlp);\n+\n+\tswitch (type) {\n+\tcase PKT_TYPE_PLAIN_IPV4:\n+\t\tif (pkt->ol_flags & PKT_RX_SEC_OFFLOAD)\n+\t\t\tsa = (struct ipsec_sa *) pkt->udata64;\n+\n+\t\t/* Check if we have a match */\n+\t\tif (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {\n+\t\t\t/* No valid match */\n+\t\t\tgoto drop_pkt_and_exit;\n+\t\t}\n+\t\tbreak;\n+\n+\tcase PKT_TYPE_PLAIN_IPV6:\n+\t\tif (pkt->ol_flags & PKT_RX_SEC_OFFLOAD)\n+\t\t\tsa = (struct ipsec_sa *) pkt->udata64;\n+\n+\t\t/* Check if we have a match */\n+\t\tif (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {\n+\t\t\t/* No valid match */\n+\t\t\tgoto drop_pkt_and_exit;\n+\t\t}\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tRTE_LOG(ERR, IPSEC, \"Unsupported packet type = %d\\n\", type);\n+\t\tgoto drop_pkt_and_exit;\n+\t}\n+\n+\t/* Check if the packet has to be bypassed */\n+\tif (sa_idx == 0)\n+\t\tgoto route_and_send_pkt;\n+\n+\t/* Else the packet has to be protected with SA */\n+\n+\t/* If the packet was IPsec processed, then SA pointer should be set */\n+\tif (sa == NULL)\n+\t\tgoto drop_pkt_and_exit;\n+\n+\t/* SPI on the packet should match with the one in SA */\n+\tif (unlikely(sa->spi != sa_idx))\n+\t\tgoto drop_pkt_and_exit;\n+\n+route_and_send_pkt:\n+\tport_id = get_route(pkt, rt, type);\n+\tif (unlikely(port_id == RTE_MAX_ETHPORTS)) {\n+\t\t/* no match */\n+\t\tgoto drop_pkt_and_exit;\n+\t}\n+\t/* else, we have a matching route */\n+\n+\t/* Update mac addresses */\n+\tupdate_mac_addrs(pkt, port_id);\n+\n+\t/* Update the event with the dest port */\n+\tipsec_event_pre_forward(pkt, port_id);\n+\treturn 1;\n+\n+drop_pkt_and_exit:\n+\tRTE_LOG(ERR, IPSEC, \"Inbound packet dropped\\n\");\n+\trte_pktmbuf_free(pkt);\n+\tev->mbuf = NULL;\n+\treturn 0;\n+}\n+\n /*\n  * Event mode exposes various operating modes depending on the\n  * capabilities of the event device and the operating mode\n@@ -134,11 +345,11 @@ static void\n ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links,\n \t\tuint8_t nb_links)\n {\n+\tstruct lcore_conf_ev_tx_int_port_wrkr lconf;\n \tunsigned int nb_rx = 0;\n-\tunsigned int port_id;\n-\tstruct rte_mbuf *pkt;\n \tstruct rte_event ev;\n \tuint32_t lcore_id;\n+\tint32_t socket_id;\n \n \t/* Check if we have links registered for this lcore */\n \tif (nb_links == 0) {\n@@ -151,6 +362,21 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links,\n \t/* Get core ID */\n \tlcore_id = rte_lcore_id();\n \n+\t/* Get socket ID */\n+\tsocket_id = rte_lcore_to_socket_id(lcore_id);\n+\n+\t/* Save routing table */\n+\tlconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;\n+\tlconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;\n+\tlconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;\n+\tlconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;\n+\tlconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;\n+\tlconf.inbound.session_pool = socket_ctx[socket_id].session_pool;\n+\tlconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;\n+\tlconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;\n+\tlconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;\n+\tlconf.outbound.session_pool = socket_ctx[socket_id].session_pool;\n+\n \tRTE_LOG(INFO, IPSEC,\n \t\t\"Launching event mode worker (non-burst - Tx internal port - \"\n \t\t\"app mode - inbound) on lcore %d\\n\", lcore_id);\n@@ -175,13 +401,11 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links,\n \t\tif (nb_rx == 0)\n \t\t\tcontinue;\n \n-\t\tport_id = ev.queue_id;\n-\t\tpkt = ev.mbuf;\n-\n-\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n-\n-\t\t/* Process packet */\n-\t\tipsec_event_pre_forward(pkt, port_id);\n+\t\tif (process_ipsec_ev_inbound(&lconf.inbound,\n+\t\t\t\t&lconf.rt, &ev) != 1) {\n+\t\t\t/* The pkt has been dropped */\n+\t\t\tcontinue;\n+\t\t}\n \n \t\t/*\n \t\t * Since tx internal port is available, events can be\ndiff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h\nnew file mode 100644\nindex 0000000..fd18a2e\n--- /dev/null\n+++ b/examples/ipsec-secgw/ipsec_worker.h\n@@ -0,0 +1,39 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Cavium, Inc\n+ */\n+#ifndef _IPSEC_WORKER_H_\n+#define _IPSEC_WORKER_H_\n+\n+#include \"ipsec.h\"\n+\n+enum pkt_type {\n+\tPKT_TYPE_PLAIN_IPV4 = 1,\n+\tPKT_TYPE_IPSEC_IPV4,\n+\tPKT_TYPE_PLAIN_IPV6,\n+\tPKT_TYPE_IPSEC_IPV6,\n+\tPKT_TYPE_INVALID\n+};\n+\n+struct route_table {\n+\tstruct rt_ctx *rt4_ctx;\n+\tstruct rt_ctx *rt6_ctx;\n+};\n+\n+/*\n+ * Conf required by event mode worker with tx internal port\n+ */\n+struct lcore_conf_ev_tx_int_port_wrkr {\n+\tstruct ipsec_ctx inbound;\n+\tstruct ipsec_ctx outbound;\n+\tstruct route_table rt;\n+} __rte_cache_aligned;\n+\n+/* TODO\n+ *\n+ * Move this function to ipsec_worker.c\n+ */\n+void ipsec_poll_mode_worker(void);\n+\n+int ipsec_launch_one_lcore(void *args);\n+\n+#endif /* _IPSEC_WORKER_H_ */\ndiff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c\nindex 7f046e3..9e17ba0 100644\n--- a/examples/ipsec-secgw/sa.c\n+++ b/examples/ipsec-secgw/sa.c\n@@ -772,17 +772,6 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)\n \tprintf(\"\\n\");\n }\n \n-struct sa_ctx {\n-\tvoid *satbl; /* pointer to array of rte_ipsec_sa objects*/\n-\tstruct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];\n-\tunion {\n-\t\tstruct {\n-\t\t\tstruct rte_crypto_sym_xform a;\n-\t\t\tstruct rte_crypto_sym_xform b;\n-\t\t};\n-\t} xf[IPSEC_SA_MAX_ENTRIES];\n-};\n-\n static struct sa_ctx *\n sa_create(const char *name, int32_t socket_id)\n {\n",
    "prefixes": [
        "11/14"
    ]
}