get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/10294/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 10294,
    "url": "https://patches.dpdk.org/api/patches/10294/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1454099352-29040-1-git-send-email-sergio.gonzalez.monroy@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1454099352-29040-1-git-send-email-sergio.gonzalez.monroy@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1454099352-29040-1-git-send-email-sergio.gonzalez.monroy@intel.com",
    "date": "2016-01-29T20:29:12",
    "name": "[dpdk-dev] example/ipsec-secgw: ipsec security gateway",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "6aa94bde55ed253046603980720aef9d66d3d001",
    "submitter": {
        "id": 73,
        "url": "https://patches.dpdk.org/api/people/73/?format=api",
        "name": "Sergio Gonzalez Monroy",
        "email": "sergio.gonzalez.monroy@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1454099352-29040-1-git-send-email-sergio.gonzalez.monroy@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/10294/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/10294/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 2F6E2C768;\n\tFri, 29 Jan 2016 21:29:42 +0100 (CET)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id 9C94AC70C\n\tfor <dev@dpdk.org>; Fri, 29 Jan 2016 21:29:39 +0100 (CET)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga101.fm.intel.com with ESMTP; 29 Jan 2016 12:29:14 -0800",
            "from sie-lab-212-209.ir.intel.com (HELO\n\tsilpixa00377983.ir.intel.com) ([10.237.212.209])\n\tby FMSMGA003.fm.intel.com with ESMTP; 29 Jan 2016 12:29:13 -0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.22,366,1449561600\"; d=\"scan'208\";a=\"643672046\"",
        "From": "Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Fri, 29 Jan 2016 20:29:12 +0000",
        "Message-Id": "<1454099352-29040-1-git-send-email-sergio.gonzalez.monroy@intel.com>",
        "X-Mailer": "git-send-email 2.4.3",
        "Subject": "[dpdk-dev] [PATCH] example/ipsec-secgw: ipsec security gateway",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Sample app implementing an IPsec Security Geteway.\nThe main goal of this app is to show the use of cryptodev framework\nin a real world application.\n\nCurrently only supported static IPv4 IPsec tunnels using AES-CBC\nand HMAC-SHA1.\n\nAlso, currently not supported:\n- SA auto negotiation (No IKE support)\n- chained mbufs\n\nSigned-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>\n---\n MAINTAINERS                              |    4 +\n doc/guides/sample_app_ug/index.rst       |    1 +\n doc/guides/sample_app_ug/ipsec_secgw.rst |  440 +++++++++++\n examples/Makefile                        |    2 +\n examples/ipsec-secgw/Makefile            |   58 ++\n examples/ipsec-secgw/esp.c               |  256 +++++++\n examples/ipsec-secgw/esp.h               |   66 ++\n examples/ipsec-secgw/ipip.h              |  100 +++\n examples/ipsec-secgw/ipsec-secgw.c       | 1218 ++++++++++++++++++++++++++++++\n examples/ipsec-secgw/ipsec.c             |  138 ++++\n examples/ipsec-secgw/ipsec.h             |  184 +++++\n examples/ipsec-secgw/rt.c                |  131 ++++\n examples/ipsec-secgw/sa.c                |  391 ++++++++++\n examples/ipsec-secgw/sp.c                |  324 ++++++++\n 14 files changed, 3313 insertions(+)\n create mode 100644 doc/guides/sample_app_ug/ipsec_secgw.rst\n create mode 100644 examples/ipsec-secgw/Makefile\n create mode 100644 examples/ipsec-secgw/esp.c\n create mode 100644 examples/ipsec-secgw/esp.h\n create mode 100644 examples/ipsec-secgw/ipip.h\n create mode 100644 examples/ipsec-secgw/ipsec-secgw.c\n create mode 100644 examples/ipsec-secgw/ipsec.c\n create mode 100644 examples/ipsec-secgw/ipsec.h\n create mode 100644 examples/ipsec-secgw/rt.c\n create mode 100644 examples/ipsec-secgw/sa.c\n create mode 100644 examples/ipsec-secgw/sp.c",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex b90aeea..996eda6 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -596,3 +596,7 @@ F: doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst\n M: Pablo de Lara <pablo.de.lara.guarch@intel.com>\n M: Daniel Mrzyglod <danielx.t.mrzyglod@intel.com>\n F: examples/ptpclient/\n+\n+M: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>\n+F: doc/guides/sample_app_ug/ipsec-secgw.rst\n+F: examples/ipsec-secgw/\ndiff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst\nindex 8a646dd..88375d2 100644\n--- a/doc/guides/sample_app_ug/index.rst\n+++ b/doc/guides/sample_app_ug/index.rst\n@@ -73,6 +73,7 @@ Sample Applications User Guide\n     proc_info\n     ptpclient\n     performance_thread\n+    ipsec_secgw\n \n **Figures**\n \ndiff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst\nnew file mode 100644\nindex 0000000..7be3f7e\n--- /dev/null\n+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst\n@@ -0,0 +1,440 @@\n+..  BSD LICENSE\n+    Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+    All rights reserved.\n+\n+    Redistribution and use in source and binary forms, with or without\n+    modification, are permitted provided that the following conditions\n+    are met:\n+\n+    * Redistributions of source code must retain the above copyright\n+    notice, this list of conditions and the following disclaimer.\n+    * Redistributions in binary form must reproduce the above copyright\n+    notice, this list of conditions and the following disclaimer in\n+    the documentation and/or other materials provided with the\n+    distribution.\n+    * Neither the name of Intel Corporation nor the names of its\n+    contributors may be used to endorse or promote products derived\n+    from this software without specific prior written permission.\n+\n+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+    \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+IPsec Security Gateway Sample Application\n+=========================================\n+\n+The IPsec Security Gateway application is a minimal example of real work\n+application using DPDK cryptodev framework.\n+\n+Overview\n+--------\n+\n+The application demonstrates the implementation of a minimal Security Gateway\n+(see Constraints bellow) using DPDK based on RFC4301, RFC4303, RFC3602 and\n+RFC2404.\n+\n+Note that IKE is not supported (therefore not IPsec compliant), so only manual\n+setting of Security Policies and Associations is allowed.\n+\n+The Security Policies (SP) are implemented as ACL rules, the Security\n+Associations (SA) are stored in a table and the Routing is implemented\n+using LPM.\n+\n+The application splits the ports between Protected and Unprotected.\n+Based on the port the trafic is coming from, the traffic would be consider\n+IPsec Inbound or Outbound.\n+Traffic from Unprotected ports is consider IPsec Inbound, and traffic from\n+Protected ports is consider IPsec Outbound.\n+\n+Path for IPsec Inbound traffic:\n+*  Read packets from the port\n+*  Classify packets between IPv4 and ESP.\n+*  Inbound SA lookup for ESP packets based on their SPI\n+*  Verification/Decryption\n+*  Removal of ESP and outter IP header\n+*  Inbound SP check using ACL of decrypted packets and any other IPv4 packet\n+we read.\n+*  Routing\n+*  Write packet to port\n+\n+Path for IPsec Outbound traffic:\n+*  Read packets from the port\n+*  Outbound SP check using ACL of all IPv4 traffic\n+*  Outbound SA lookup for packets that need IPsec protection\n+*  Add ESP and outter IP header\n+*  Encryption/Digest\n+*  Routing\n+*  Write packet to port\n+\n+Constraints\n+-----------\n+*  IPv4 traffic\n+*  ESP tunnel mode\n+*  EAS-CBC and HMAC-SHA1\n+*  Each SA must be handle by a unique lcore (1 RX queue per port)\n+*  No chained mbufs\n+\n+Compiling the Application\n+-------------------------\n+\n+To compile the application:\n+\n+#. Go to the sample application directory:\n+\n+   .. code-block:: console\n+\n+      export RTE_SDK=/path/to/rte_sdk\n+      cd ${RTE_SDK}/examples/ipsec-secgw\n+\n+#. Set the target (a default target is used if not specified). For example:\n+\n+   .. code-block:: console\n+\n+      export RTE_TARGET=x86_64-native-linuxapp-gcc\n+\n+    See the *DPDK Getting Started Guide* for possible RTE_TARGET values.\n+\n+#. Build the application:\n+\n+   .. code-block:: console\n+\n+       make\n+\n+Running the Application\n+-----------------------\n+\n+The application has a number of command line options:\n+\n+.. code-block:: console\n+\n+   ./build/ipsec-secgw [EAL options] -- -p PORTMASK -P -u PORTMASK --config\n+   (port,queue,lcore)[,(port,queue,lcore] --EP0|--EP1 --cdev AESNI|QAT\n+\n+where,\n+\n+*   -p PORTMASK: Hexadecimal bitmask of ports to configure\n+\n+*   -P: optional, sets all ports to promiscuous mode so that packets are\n+    accepted regardless of the packet's Ethernet MAC destination address.\n+    Without this option, only packets with the Ethernet MAC destination address\n+    set to the Ethernet address of the port are accepted (default is enabled).\n+\n+*   -u PORTMASK: hexadecimal bitmask of unprotected ports\n+\n+*   --config (port,queue,lcore)[,(port,queue,lcore)]: determines which queues\n+    from which ports are mapped to which cores\n+\n+*   --cdev AESNI/QAT: choose whether to use QAT HW crypto or EASNI SW crypto\n+\n+*   --EP0: configure the app as Endpoint 0.\n+\n+*   --EP1: configure the app as Endpoint 1.\n+\n+Either --EP0 or --EP1 must be specified.\n+\n+The mapping of lcores to port/queues is similar to other l3fwd applications.\n+\n+For example, given the following command line:\n+.. code-block:: console\n+\n+    ./build/ipsec-secgw -l 20,21 -n 4 --socket-mem 0,2048 -- -p 0xf -P -u 0x3\n+    --config=\"(0,0,20),(1,0,20),(2,0,21),(3,0,21)\" --cdev AESNI --EP0\n+\n+where each options means:\n+\n+*   The -l option enables cores 20 and 21\n+\n+*   The -n option sets memory 4 channels\n+\n+*   The --socket-mem to use 2GB on socket 1\n+\n+*   The -p option enables ports (detected) 0, 1, 2 and 3\n+\n+*   The -P option enables promiscous mode\n+\n+*   The -u option sets ports 1 and 2 as unprotected, leaving 2 and 3 as protected\n+\n+*   The --config option enables one queue per port with the following mapping:\n+\n++----------+-----------+-----------+---------------------------------------+\n+| **Port** | **Queue** | **lcore** | **Description**                       |\n+|          |           |           |                                       |\n++----------+-----------+-----------+---------------------------------------+\n+| 0        | 0         | 20        | Map queue 0 from port 0 to lcore 20.  |\n+|          |           |           |                                       |\n++----------+-----------+-----------+---------------------------------------+\n+| 1        | 0         | 20        | Map queue 0 from port 1 to lcore 20.  |\n+|          |           |           |                                       |\n++----------+-----------+-----------+---------------------------------------+\n+| 2        | 0         | 21        | Map queue 0 from port 2 to lcore 21.  |\n+|          |           |           |                                       |\n++----------+-----------+-----------+---------------------------------------+\n+| 3        | 0         | 21        | Map queue 0 from port 3 to lcore 21.  |\n+|          |           |           |                                       |\n++----------+-----------+-----------+---------------------------------------+\n+\n+Refer to the *DPDK Getting Started Guide* for general information on running applications and the Environment Abstraction Layer (EAL) options.\n+\n+Configurations\n+--------------\n+\n+The following sections provide some details on the default values used to\n+initialize the SP, SA and Routing tables.\n+Currently all the configuration is hardcoded into the application.\n+\n+Security Policy Initialization\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+As mention in the overview, the Security Policies are ACL rules.\n+There are two ACLs, Inbound and Outboud.\n+The application would also replicate the set of two ACLs per socket in use.\n+\n+Following are the default rules:\n+\n+Endpoint 0 Outbound Security Policies:\n+\n++---------+------------------+-----------+------------+\n+| **Src** | **Dst**          | **proto** | **SA idx** |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.105.0/24 | Any       | 5          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.106.0/24 | Any       | 6          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.107.0/24 | Any       | 7          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.108.0/24 | Any       | 8          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+\n+Endpoint 0 Inbound Security Policies:\n+\n++---------+------------------+-----------+------------+\n+| **Src** | **Dst**          | **proto** | **SA idx** |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.115.0/24 | Any       | 5          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.116.0/24 | Any       | 6          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.117.0/24 | Any       | 7          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.118.0/24 | Any       | 8          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+\n+Endpoint 1 Outbound Security Policies:\n+\n++---------+------------------+-----------+------------+\n+| **Src** | **Dst**          | **proto** | **SA idx** |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.115.0/24 | Any       | 5          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.116.0/24 | Any       | 6          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.117.0/24 | Any       | 7          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.118.0/24 | Any       | 8          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+\n+Endpoint 1 Inbound Security Policies:\n+\n++---------+------------------+-----------+------------+\n+| **Src** | **Dst**          | **proto** | **SA idx** |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.105.0/24 | Any       | 5          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.106.0/24 | Any       | 6          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.107.0/24 | Any       | 7          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+| Any     | 192.168.108.0/24 | Any       | 8          |\n+|         |                  |           |            |\n++---------+------------------+-----------+------------+\n+\n+\n+Security Association Initialization\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+The SAs are kept in a array table.\n+\n+For Inbound, the SPI is used as index module the table size.\n+This means that on a table for 100 SA, SPI 5 and 105 would use the same index\n+and that is not currently supported.\n+\n+Notice that it is not an issue for Outbound traffic as we store the index and\n+not the SPI in the Security Policy.\n+\n+All SAs are configured with the same cipher algo, block size and key (AES-CBC\n+16B block), and same authentication algo, digest size and key (HMAC-SHA1, 12B\n+digest).\n+\n+Following are the default values:\n+\n+Endpoint 0 Outbound Security Associations:\n+\n++---------+----------------+------------------+\n+| **SPI** | **Tunnel src** | **Tunnel dst**   |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 5       | 172.16.1.5     | 172.16.2.5       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 6       | 172.16.1.6     | 172.16.2.6       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 7       | 172.16.1.7     | 172.16.2.7       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 8       | 172.16.1.8     | 172.16.2.8       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+\n+Endpoint 0 Inbound Security Associations:\n+\n++---------+----------------+------------------+\n+| **SPI** | **Tunnel src** | **Tunnel dst**   |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 5       | 172.16.2.5     | 172.16.1.5       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 6       | 172.16.2.6     | 172.16.1.6       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 7       | 172.16.2.7     | 172.16.1.7       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 8       | 172.16.2.8     | 172.16.1.8       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+\n+Endpoint 1 Outbound Security Associations:\n+\n++---------+----------------+------------------+\n+| **SPI** | **Tunnel src** | **Tunnel dst**   |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 5       | 172.16.2.5     | 172.16.1.5       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 6       | 172.16.2.6     | 172.16.1.6       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 7       | 172.16.2.7     | 172.16.1.7       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 8       | 172.16.2.8     | 172.16.1.8       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+\n+Endpoint 1 Inbound Security Associations:\n+\n++---------+----------------+------------------+\n+| **SPI** | **Tunnel src** | **Tunnel dst**   |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 5       | 172.16.1.5     | 172.16.2.5       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 6       | 172.16.1.6     | 172.16.2.6       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 7       | 172.16.1.7     | 172.16.2.7       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+| 8       | 172.16.1.8     | 172.16.2.8       |\n+|         |                |                  |\n++---------+----------------+------------------+\n+\n+Routing Initialization\n+~~~~~~~~~~~~~~~~~~~~~~\n+\n+The Routing is implemented using LPM table.\n+\n+Following default values:\n+\n+Endpoint 0 Routing Table:\n+\n++------------------+----------+\n+| **Dst addr**     | **Port** |\n+|                  |          |\n++------------------+----------+\n+| 172.16.2.5/32    | 0        |\n+|                  |          |\n++------------------+----------+\n+| 172.16.2.6/32    | 0        |\n+|                  |          |\n++------------------+----------+\n+| 172.16.2.7/32    | 1        |\n+|                  |          |\n++------------------+----------+\n+| 172.16.2.8/32    | 1        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.115.0/24 | 2        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.116.0/24 | 2        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.117.0/24 | 3        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.118.0/24 | 3        |\n+|                  |          |\n++------------------+----------+\n+\n+Endpoint 1 Routing Table:\n+\n++------------------+----------+\n+| **Dst addr**     | **Port** |\n+|                  |          |\n++------------------+----------+\n+| 172.16.1.5/32    | 2        |\n+|                  |          |\n++------------------+----------+\n+| 172.16.1.6/32    | 2        |\n+|                  |          |\n++------------------+----------+\n+| 172.16.1.7/32    | 3        |\n+|                  |          |\n++------------------+----------+\n+| 172.16.1.8/32    | 3        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.105.0/24 | 0        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.106.0/24 | 0        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.107.0/24 | 1        |\n+|                  |          |\n++------------------+----------+\n+| 192.168.108.0/24 | 1        |\n+|                  |          |\n++------------------+----------+\ndiff --git a/examples/Makefile b/examples/Makefile\nindex 1cb4785..65ce6ce 100644\n--- a/examples/Makefile\n+++ b/examples/Makefile\n@@ -1,6 +1,7 @@\n #   BSD LICENSE\n #\n #   Copyright(c) 2014 6WIND S.A.\n+#   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n #\n #   Redistribution and use in source and binary forms, with or without\n #   modification, are permitted provided that the following conditions\n@@ -78,5 +79,6 @@ DIRS-y += vmdq\n DIRS-y += vmdq_dcb\n DIRS-$(CONFIG_RTE_LIBRTE_POWER) += vm_power_manager\n DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += l2fwd-crypto\n+DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += ipsec-secgw\n \n include $(RTE_SDK)/mk/rte.extsubdir.mk\ndiff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile\nnew file mode 100644\nindex 0000000..5f893b8\n--- /dev/null\n+++ b/examples/ipsec-secgw/Makefile\n@@ -0,0 +1,58 @@\n+#   BSD LICENSE\n+#\n+#   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+#   All rights reserved.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of Intel Corporation nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+ifeq ($(RTE_SDK),)\n+\t$(error \"Please define RTE_SDK environment variable\")\n+endif\n+\n+# Default target, can be overridden by command line or environment\n+RTE_TARGET ?= x86_64-native-linuxapp-gcc\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+APP = ipsec-secgw\n+\n+CFLAGS += -O3 -gdwarf-2\n+CFLAGS += $(WERROR_FLAGS)\n+\n+VPATH += $(SRCDIR)/librte_ipsec\n+\n+#\n+# all source are stored in SRCS-y\n+#\n+SRCS-y += ipsec.c\n+SRCS-y += esp.c\n+SRCS-y += sp.c\n+SRCS-y += sa.c\n+SRCS-y += rt.c\n+SRCS-y += ipsec-secgw.c\n+\n+include $(RTE_SDK)/mk/rte.extapp.mk\ndiff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c\nnew file mode 100644\nindex 0000000..1be0621\n--- /dev/null\n+++ b/examples/ipsec-secgw/esp.c\n@@ -0,0 +1,256 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdint.h>\n+#include <stdlib.h>\n+#include <netinet/ip.h>\n+#include <sys/types.h>\n+#include <sys/stat.h>\n+#include <fcntl.h>\n+#include <unistd.h>\n+\n+#include <rte_memcpy.h>\n+#include <rte_crypto.h>\n+#include <rte_cryptodev.h>\n+#include <rte_random.h>\n+\n+#include \"ipsec.h\"\n+#include \"esp.h\"\n+#include \"ipip.h\"\n+\n+static inline void\n+random_iv_u64(uint64_t *buf, uint16_t n)\n+{\n+\tunsigned left = n & 0x7;\n+\tunsigned i;\n+\n+\tIPSEC_ASSERT((n & 0x3) == 0);\n+\n+\tfor (i = 0; i < (n >> 3); i++)\n+\t\tbuf[i] = rte_rand();\n+\n+\tif (left)\n+\t\t*((uint32_t *)&buf[i]) = (uint32_t)lrand48();\n+}\n+\n+/* IPv4 Tunnel */\n+int\n+esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop)\n+{\n+\tuint16_t digest_len, esp_len, iphdr_len;\n+\tint32_t payload_len;\n+\n+\tIPSEC_ASSERT(m != NULL);\n+\n+\tiphdr_len = sizeof(struct ip);\n+\tesp_len = sizeof(struct esp_hdr) + sa->iv_len;\n+\tdigest_len = sa->digest_len;\n+\tpayload_len = rte_pktmbuf_pkt_len(m) - iphdr_len - esp_len - digest_len;\n+\n+\tif ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {\n+\t\tIPSEC_LOG(DEBUG, IPSEC_ESP, \"payload %d not a multiple of %u\\n\",\n+\t\t\t\tpayload_len, sa->block_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tcop->data.to_cipher.offset = iphdr_len + esp_len;\n+\tcop->data.to_cipher.length = payload_len;\n+\n+\tcop->data.to_hash.offset = iphdr_len;\n+\tif (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)\n+\t\tcop->data.to_hash.length = esp_len - sa->iv_len;\n+\telse\n+\t\tcop->data.to_hash.length = esp_len + payload_len;\n+\n+\tcop->iv.data = rte_pktmbuf_mtod_offset(m, void*,\n+\t\t\tiphdr_len + esp_len - sa->iv_len);\n+\tcop->iv.phys_addr = rte_pktmbuf_mtophys_offset(m,\n+\t\t\tiphdr_len + esp_len - sa->iv_len);\n+\n+\tcop->iv.length = sa->iv_len;\n+\n+\tcop->digest.data = rte_pktmbuf_mtod_offset(m, void*,\n+\t\t\tiphdr_len + esp_len + payload_len);\n+\tcop->digest.phys_addr = rte_pktmbuf_mtophys_offset(m,\n+\t\t\tiphdr_len + esp_len + payload_len);\n+\tcop->digest.length = digest_len;\n+\n+\treturn 0;\n+}\n+\n+int\n+esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop)\n+{\n+\tuint16_t digest_len, esp_len, iphdr_len;\n+\tuint8_t *nexthdr, *pad_len;\n+\tuint8_t *padding;\n+\tuint16_t i;\n+\n+\tIPSEC_ASSERT(m != NULL);\n+\n+\tiphdr_len = sizeof(struct ip);\n+\tesp_len = sizeof(struct esp_hdr) + sa->iv_len;\n+\tdigest_len = sa->digest_len;\n+\n+\tif (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {\n+\t\tIPSEC_LOG(ERR, IPSEC_ESP, \"Failed crypto op\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tnexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,\n+\t\t\trte_pktmbuf_pkt_len(m) - digest_len - 1);\n+\tpad_len = nexthdr - 1;\n+\n+\tpadding = pad_len - *pad_len;\n+\tfor (i = 0; i < *pad_len; i++) {\n+\t\tif (padding[i] != i) {\n+\t\t\tIPSEC_LOG(ERR, IPSEC_ESP, \"invalid pad_len field\\n\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\tif (rte_pktmbuf_trim(m, *pad_len + 2 + digest_len)) {\n+\t\tIPSEC_LOG(ERR, IPSEC_ESP,\n+\t\t\t\t\"failed to remove pad_len + digest\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn ip4ip_inbound(m, iphdr_len + esp_len);\n+}\n+\n+int\n+esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop)\n+{\n+\tuint16_t digest_len, esp_len, payload_len, block_sz, pad_len;\n+\tint32_t pad_payload_len;\n+\tstruct ip *ip;\n+\tstruct esp_hdr *esp;\n+\tint i;\n+\tchar *padding;\n+\n+\trte_prefetch0(rte_pktmbuf_mtod(m, uint8_t *) - RTE_CACHE_LINE_SIZE);\n+\trte_prefetch0(rte_pktmbuf_mtod(m, uint8_t *));\n+\n+\tIPSEC_ASSERT(m != NULL);\n+\tIPSEC_ASSERT(sa != NULL);\n+\n+\t/* Payload length */\n+\tpayload_len = rte_pktmbuf_pkt_len(m);\n+\n+\tblock_sz = sa->block_size;\n+\n+\t/* Per RFC4303:\n+\t *  padded payload needs to be multiple of 4 bytes.\n+\t * All application supported block sizes must be power of 2\n+\t */\n+\tpad_len = (payload_len + 2) & (block_sz - 1);\n+\tpad_len = ((block_sz - pad_len) & (block_sz - 1)) + 2;\n+\n+\t/* Padded payload length */\n+\tpad_payload_len = payload_len + pad_len;\n+\t/* ESP header length */\n+\tesp_len = sizeof(struct esp_hdr) + sa->iv_len;\n+\t/* Digest length */\n+\tdigest_len = sa->digest_len;\n+\n+\tIPSEC_LOG(DEBUG, IPSEC_ESP,\n+\t\t\t\"pktlen %u, esp_len %u, digest_len %u, payload_len %u,\"\n+\t\t\t\" pad_payload_len %u, block_sz %u, pad_len %u\\n\",\n+\t\t\trte_pktmbuf_pkt_len(m), esp_len, digest_len,\n+\t\t\tpayload_len, pad_payload_len, block_sz,\tpad_len);\n+\n+\t/* Check maximum packet size */\n+\tif (unlikely(esp_len + pad_payload_len + digest_len > IP_MAXPACKET)) {\n+\t\tIPSEC_LOG(DEBUG, IPSEC_ESP, \"ipsec packet is too big\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tpadding = rte_pktmbuf_append(m, pad_len + digest_len);\n+\n+\tIPSEC_ASSERT(padding != NULL);\n+\n+\tip = ip4ip_outbound(m, esp_len, sa->src, sa->dst);\n+\n+\tesp = (struct esp_hdr *)(ip + 1);\n+\tesp->spi = sa->spi;\n+\n+\tesp->seq = htonl(sa->seq++);\n+\n+\tIPSEC_LOG(DEBUG, IPSEC_ESP, \"pktlen %u\\n\", rte_pktmbuf_pkt_len(m));\n+\n+\t/* Fill pad_len using default sequential scheme */\n+\tfor (i = 0; i < pad_len - 2; i++)\n+\t\tpadding[i] = i + 1;\n+\n+\tpadding[pad_len - 2] = pad_len - 2;\n+\tpadding[pad_len - 1] = IPPROTO_IPIP;\n+\tip->ip_p = IPPROTO_ESP;\n+\n+\tcop->data.to_cipher.offset = sizeof(struct ip) + esp_len;\n+\tcop->data.to_cipher.length = pad_payload_len;\n+\n+\tcop->data.to_hash.offset = sizeof(struct ip);\n+\tcop->data.to_hash.length = esp_len + pad_payload_len;\n+\n+\tcop->iv.data = rte_pktmbuf_mtod_offset(m, void*,\n+\t\t\tsizeof(struct ip) + esp_len - sa->iv_len);\n+\tcop->iv.phys_addr = rte_pktmbuf_mtophys_offset(m,\n+\t\t\tsizeof(struct ip) + esp_len - sa->iv_len);\n+\tcop->iv.length = sa->iv_len;\n+\n+\tcop->digest.data = rte_pktmbuf_mtod_offset(m, void*,\n+\t\t\tsizeof(struct ip) + esp_len + pad_payload_len);\n+\tcop->digest.phys_addr = rte_pktmbuf_mtophys_offset(m,\n+\t\t\tsizeof(struct ip) + esp_len + pad_payload_len);\n+\tcop->digest.length = digest_len;\n+\n+\trandom_iv_u64((uint64_t *)cop->iv.data, cop->iv.length);\n+\n+\treturn 0;\n+}\n+\n+int\n+esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,\n+\t\tstruct ipsec_sa *sa __rte_unused,\n+\t\tstruct rte_crypto_op *cop)\n+{\n+\tif (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {\n+\t\tIPSEC_LOG(ERR, IPSEC_ESP, \"Failed crypto op\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\ndiff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h\nnew file mode 100644\nindex 0000000..d7c8ba6\n--- /dev/null\n+++ b/examples/ipsec-secgw/esp.h\n@@ -0,0 +1,66 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+#ifndef __RTE_IPSEC_XFORM_ESP_H__\n+#define __RTE_IPSEC_XFORM_ESP_H__\n+\n+struct mbuf;\n+\n+/* RFC4303 */\n+struct esp_hdr {\n+\tuint32_t spi;\n+\tuint32_t seq;\n+\t/* Payload */\n+\t/* Padding */\n+\t/* Pad Length */\n+\t/* Next Header */\n+\t/* Integrity Check Value - ICV */\n+};\n+\n+/* IPv4 Tunnel */\n+int\n+esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop);\n+\n+int\n+esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop);\n+\n+int\n+esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop);\n+\n+int\n+esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop);\n+\n+#endif /* __RTE_IPSEC_XFORM_ESP_H__ */\ndiff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h\nnew file mode 100644\nindex 0000000..16c5dfb\n--- /dev/null\n+++ b/examples/ipsec-secgw/ipip.h\n@@ -0,0 +1,100 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef __IPIP_H__\n+#define __IPIP_H__\n+\n+#include <stdint.h>\n+#include <netinet/in.h>\n+#include <netinet/ip.h>\n+\n+#include <rte_mbuf.h>\n+\n+static inline  struct ip *\n+ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t src, uint32_t dst)\n+{\n+\tstruct ip *inip, *outip;\n+\n+\tinip = rte_pktmbuf_mtod(m, struct ip*);\n+\n+\tIPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);\n+\n+\toffset += sizeof(struct ip);\n+\n+\toutip = (struct ip *)rte_pktmbuf_prepend(m, offset);\n+\n+\tIPSEC_ASSERT(outip != NULL);\n+\n+\t/* Per RFC4301 5.1.2.1 */\n+\toutip->ip_len = htons(rte_pktmbuf_data_len(m));\n+\toutip->ip_v = IPVERSION;\n+\toutip->ip_hl = 5;\n+\toutip->ip_tos = inip->ip_tos;\n+\n+\toutip->ip_id = 0;\n+\toutip->ip_off = 0;\n+\n+\toutip->ip_ttl = IPDEFTTL;\n+\n+\toutip->ip_dst.s_addr = dst;\n+\toutip->ip_src.s_addr = src;\n+\n+\treturn outip;\n+}\n+\n+static inline int\n+ip4ip_inbound(struct rte_mbuf *m, uint32_t offset)\n+{\n+\tstruct ip *inip;\n+\tstruct ip *outip;\n+\n+\toutip = rte_pktmbuf_mtod(m, struct ip*);\n+\n+\tIPSEC_ASSERT(outip->ip_v == IPVERSION);\n+\n+\toffset += sizeof(struct ip);\n+\tinip = (struct ip *)rte_pktmbuf_adj(m, offset);\n+\tIPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);\n+\n+\t/* Check packet is still bigger than IP header (inner) */\n+\tIPSEC_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));\n+\n+\t/* RFC4301 5.1.2.1 Note 6 */\n+\tif ((inip->ip_tos & htons(IPTOS_ECN_ECT0 | IPTOS_ECN_ECT1)) &&\n+\t\t\t((outip->ip_tos & htons(IPTOS_ECN_CE)) == IPTOS_ECN_CE))\n+\t\tinip->ip_tos |= htons(IPTOS_ECN_CE);\n+\n+\treturn 0;\n+}\n+\n+#endif /* __IPIP_H__ */\ndiff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c\nnew file mode 100644\nindex 0000000..d2e8972\n--- /dev/null\n+++ b/examples/ipsec-secgw/ipsec-secgw.c\n@@ -0,0 +1,1218 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <stdint.h>\n+#include <inttypes.h>\n+#include <sys/types.h>\n+#include <string.h>\n+#include <sys/queue.h>\n+#include <stdarg.h>\n+#include <errno.h>\n+#include <getopt.h>\n+\n+#include <rte_common.h>\n+#include <rte_byteorder.h>\n+#include <rte_log.h>\n+#include <rte_eal.h>\n+#include <rte_launch.h>\n+#include <rte_atomic.h>\n+#include <rte_cycles.h>\n+#include <rte_prefetch.h>\n+#include <rte_lcore.h>\n+#include <rte_per_lcore.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_interrupts.h>\n+#include <rte_pci.h>\n+#include <rte_random.h>\n+#include <rte_debug.h>\n+#include <rte_ether.h>\n+#include <rte_ethdev.h>\n+#include <rte_mempool.h>\n+#include <rte_mbuf.h>\n+#include <rte_acl.h>\n+#include <rte_lpm.h>\n+#include <rte_cryptodev.h>\n+#include <rte_cryptodev.h>\n+\n+#include \"ipsec.h\"\n+\n+#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1\n+\n+#define MAX_JUMBO_PKT_LEN  9600\n+\n+#define MEMPOOL_CACHE_SIZE 256\n+\n+#define NB_MBUF\t(32000)\n+\n+#define CDEV_MP_NB_OBJS 2048\n+#define CDEV_MP_CACHE_SZ 64\n+#define MAX_QUEUE_PAIRS 1\n+\n+#define OPTION_CONFIG\t\t\"config\"\n+#define OPTION_EP0\t\t\"ep0\"\n+#define OPTION_EP1\t\t\"ep1\"\n+#define OPTION_CDEV_TYPE\t\"cdev\"\n+\n+#define MAX_PKT_BURST 32\n+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */\n+\n+#define NB_SOCKETS 4\n+\n+/* Configure how many packets ahead to prefetch, when reading packets */\n+#define PREFETCH_OFFSET\t3\n+\n+#define MAX_RX_QUEUE_PER_LCORE 16\n+\n+#define MAX_LCORE_PARAMS 1024\n+\n+#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))\n+\n+/*\n+ * Configurable number of RX/TX ring descriptors\n+ */\n+#define IPSEC_SECGW_RX_DESC_DEFAULT 128\n+#define IPSEC_SECGW_TX_DESC_DEFAULT 512\n+static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;\n+static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;\n+\n+#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN\n+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \\\n+\t(((uint64_t)((a) & 0xff) << 56) | \\\n+\t((uint64_t)((b) & 0xff) << 48) | \\\n+\t((uint64_t)((c) & 0xff) << 40) | \\\n+\t((uint64_t)((d) & 0xff) << 32) | \\\n+\t((uint64_t)((e) & 0xff) << 24) | \\\n+\t((uint64_t)((f) & 0xff) << 16) | \\\n+\t((uint64_t)((g) & 0xff) << 8)  | \\\n+\t((uint64_t)(h) & 0xff))\n+#else\n+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \\\n+\t(((uint64_t)((h) & 0xff) << 56) | \\\n+\t((uint64_t)((g) & 0xff) << 48) | \\\n+\t((uint64_t)((f) & 0xff) << 40) | \\\n+\t((uint64_t)((e) & 0xff) << 32) | \\\n+\t((uint64_t)((d) & 0xff) << 24) | \\\n+\t((uint64_t)((c) & 0xff) << 16) | \\\n+\t((uint64_t)((b) & 0xff) << 8) | \\\n+\t((uint64_t)(a) & 0xff))\n+#endif\n+#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))\n+\n+#define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \\\n+\t\taddr.addr_bytes[0], addr.addr_bytes[1], \\\n+\t\taddr.addr_bytes[2], addr.addr_bytes[3], \\\n+\t\taddr.addr_bytes[4], addr.addr_bytes[5], \\\n+\t\t0, 0)\n+\n+/* port/source ethernet addr and destination ethernet addr */\n+struct ethaddr_info {\n+\tuint64_t src, dst;\n+};\n+\n+struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },\n+\t{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }\n+};\n+\n+/* mask of enabled ports */\n+static uint32_t enabled_port_mask;\n+static uint32_t unprotected_port_mask;\n+static int promiscuous_on = 1;\n+static int numa_on = 1; /**< NUMA is enabled by default. */\n+static int ep = -1; /**< Endpoint configuration (0 or 1) */\n+static unsigned cdev_type = -1;\n+static unsigned nb_lcores;\n+\n+struct buffer {\n+\tuint16_t len;\n+\tstruct rte_mbuf *m_table[MAX_PKT_BURST];\n+};\n+\n+struct lcore_rx_queue {\n+\tuint8_t port_id;\n+\tuint8_t queue_id;\n+} __rte_cache_aligned;\n+\n+struct lcore_params {\n+\tuint8_t port_id;\n+\tuint8_t queue_id;\n+\tuint8_t lcore_id;\n+} __rte_cache_aligned;\n+\n+static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];\n+\n+static struct lcore_params *lcore_params;\n+static uint16_t nb_lcore_params;\n+\n+struct lcore_conf {\n+\tuint16_t nb_rx_queue;\n+\tstruct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];\n+\tuint16_t tx_queue_id[RTE_MAX_ETHPORTS];\n+\tstruct buffer tx_mbufs[RTE_MAX_ETHPORTS];\n+\tuint16_t cdev;\n+\tuint16_t cdev_q;\n+} __rte_cache_aligned;\n+\n+static struct lcore_conf lcore_conf[RTE_MAX_LCORE];\n+\n+static struct rte_eth_conf port_conf = {\n+\t.rxmode = {\n+\t\t.mq_mode\t= ETH_MQ_RX_RSS,\n+\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n+\t\t.split_hdr_size = 0,\n+\t\t.header_split   = 0, /**< Header Split disabled */\n+\t\t.hw_ip_checksum = 1, /**< IP checksum offload enabled */\n+\t\t.hw_vlan_filter = 0, /**< VLAN filtering disabled */\n+\t\t.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */\n+\t\t.hw_strip_crc   = 0, /**< CRC stripped by hardware */\n+\t},\n+\t.rx_adv_conf = {\n+\t\t.rss_conf = {\n+\t\t\t.rss_key = NULL,\n+\t\t\t.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |\n+\t\t\t\tETH_RSS_TCP | ETH_RSS_SCTP,\n+\t\t},\n+\t},\n+\t.txmode = {\n+\t\t.mq_mode = ETH_MQ_TX_NONE,\n+\t},\n+};\n+\n+static struct socket_ctx socket_ctx[NB_SOCKETS];\n+\n+struct traffic_type {\n+\tconst uint8_t *data[MAX_PKT_BURST * 2];\n+\tstruct rte_mbuf *pkts[MAX_PKT_BURST * 2];\n+\tuint32_t res[MAX_PKT_BURST * 2];\n+\tuint32_t num;\n+};\n+\n+struct ipsec_traffic {\n+\tstruct traffic_type ipsec4;\n+\tstruct traffic_type ipv4;\n+};\n+\n+static inline void\n+prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)\n+{\n+\tuint8_t *nlp;\n+\n+\tif (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {\n+\t\trte_pktmbuf_adj(pkt, ETHER_HDR_LEN);\n+\t\tnlp = rte_pktmbuf_mtod_offset(pkt, uint8_t *,\n+\t\t\t\toffsetof(struct ip, ip_p));\n+\t\tif (*nlp == IPPROTO_ESP)\n+\t\t\tt->ipsec4.pkts[(t->ipsec4.num)++] = pkt;\n+\t\telse {\n+\t\t\tt->ipv4.data[t->ipv4.num] = nlp;\n+\t\t\tt->ipv4.pkts[(t->ipv4.num)++] = pkt;\n+\t\t}\n+\t} else {\n+\t\t/* Unknown/Unsupported type, drop the packet */\n+\t\trte_pktmbuf_free(pkt);\n+\t}\n+}\n+\n+static inline void\n+prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,\n+\t\tuint16_t nb_pkts)\n+{\n+\tint i;\n+\n+\tt->ipsec4.num = 0;\n+\tt->ipv4.num = 0;\n+\n+\tfor (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {\n+\t\trte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],\n+\t\t\t\t\tvoid *));\n+\t\tprepare_one_packet(pkts[i], t);\n+\t}\n+\t/* Process left packets */\n+\tfor (; i < nb_pkts; i++)\n+\t\tprepare_one_packet(pkts[i], t);\n+}\n+\n+static inline void\n+prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)\n+{\n+\tpkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;\n+\tpkt->l3_len = sizeof(struct ip);\n+\tpkt->l2_len = ETHER_HDR_LEN;\n+\n+\tuint64_t *ethhdr = (uint64_t *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+\tethhdr[0] = ethaddr_tbl[port].dst |\n+\t\t((ethaddr_tbl[port].src & 0xffff) << 48);\n+\tethhdr[1] = ethaddr_tbl[port].src >> 16 |\n+\t\t((uint64_t)rte_cpu_to_be_16(ETHER_TYPE_IPv4) << 32) |\n+\t\t(ethhdr[1] & (0xffffUL << 48));\n+#else\n+\tethhdr[0] = ethaddr_tbl[port].dst |\n+\t\t(ethaddr_tbl[port].src >> 48 & 0xffff);\n+\tethhdr[1] = ethaddr_tbl[port].src << 16 | ((ETHER_TYPE_IPv4)UL << 16) |\n+\t\t(ethhdr[1] & 0xffffUL);\n+#endif\n+}\n+\n+static inline void\n+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)\n+{\n+\tint i;\n+\tconst int prefetch_offset = 2;\n+\n+\tfor (i = 0; i < (nb_pkts - prefetch_offset); i++) {\n+\t\trte_prefetch0(pkts[i + prefetch_offset]->cacheline1);\n+\t\tprepare_tx_pkt(pkts[i], port);\n+\t}\n+\t/* Process left packets */\n+\tfor (; i < nb_pkts; i++)\n+\t\tprepare_tx_pkt(pkts[i], port);\n+}\n+\n+/* Send burst of packets on an output interface */\n+static inline int\n+send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)\n+{\n+\tstruct rte_mbuf **m_table;\n+\tint ret;\n+\tuint16_t queueid;\n+\n+\tqueueid = qconf->tx_queue_id[port];\n+\tm_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;\n+\n+\tprepare_tx_burst(m_table, n, port);\n+\n+\tret = rte_eth_tx_burst(port, queueid, m_table, n);\n+\tif (unlikely(ret < n)) {\n+\t\tdo {\n+\t\t\trte_pktmbuf_free(m_table[ret]);\n+\t\t} while (++ret < n);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Enqueue a single packet, and send burst if queue is filled */\n+static inline int\n+send_single_packet(struct rte_mbuf *m, uint8_t port)\n+{\n+\tuint32_t lcore_id;\n+\tuint16_t len;\n+\tstruct lcore_conf *qconf;\n+\n+\tlcore_id = rte_lcore_id();\n+\n+\tqconf = &lcore_conf[lcore_id];\n+\tlen = qconf->tx_mbufs[port].len;\n+\tqconf->tx_mbufs[port].m_table[len] = m;\n+\tlen++;\n+\n+\t/* enough pkts to be sent */\n+\tif (unlikely(len == MAX_PKT_BURST)) {\n+\t\tsend_burst(qconf, MAX_PKT_BURST, port);\n+\t\tlen = 0;\n+\t}\n+\n+\tqconf->tx_mbufs[port].len = len;\n+\treturn 0;\n+}\n+\n+static inline void\n+process_pkts_inbound(struct socket_ctx *ctx, struct ipsec_traffic *traffic)\n+{\n+\tstruct sa_ctx *sa_ctx = ctx->sa_ipv4_in;\n+\tstruct sp_ctx *sp_ctx = ctx->sp_ipv4_in;\n+\tstruct rte_mbuf *m;\n+\tuint16_t idx, nb_pkts_in, i, j;\n+\tuint32_t sa_idx, res;\n+\tstruct ipsec_ctx ipsec_ctx;\n+\tstruct lcore_conf *qconf;\n+\n+\tqconf = &lcore_conf[rte_lcore_id()];\n+\tipsec_ctx.cdev = qconf->cdev;\n+\tipsec_ctx.queue = qconf->cdev_q;\n+\tipsec_ctx.sa_ctx = sa_ctx;\n+\n+\tnb_pkts_in = ipsec_inbound(&ipsec_ctx, traffic->ipsec4.pkts,\n+\t\t\ttraffic->ipsec4.num, MAX_PKT_BURST);\n+\n+\t/* SP/ACL Inbound check ipsec and ipv4 */\n+\tfor (i = 0; i < nb_pkts_in; i++) {\n+\t\tidx = traffic->ipv4.num++;\n+\t\tm = traffic->ipsec4.pkts[i];\n+\t\ttraffic->ipv4.pkts[idx] = m;\n+\t\ttraffic->ipv4.data[idx] = rte_pktmbuf_mtod_offset(m,\n+\t\t\t\tuint8_t *, offsetof(struct ip, ip_p));\n+\t}\n+\n+\trte_acl_classify((struct rte_acl_ctx *)sp_ctx, traffic->ipv4.data,\n+\t\t\ttraffic->ipv4.res, traffic->ipv4.num,\n+\t\t\tDEFAULT_MAX_CATEGORIES);\n+\n+\tj = 0;\n+\tfor (i = 0; i < traffic->ipv4.num - nb_pkts_in; i++) {\n+\t\tm = traffic->ipv4.pkts[i];\n+\t\tres = traffic->ipv4.res[i];\n+\t\tif (res & ~BYPASS) {\n+\t\t\trte_pktmbuf_free(m);\n+\t\t\tcontinue;\n+\t\t}\n+\t\ttraffic->ipv4.pkts[j++] = m;\n+\t}\n+\t/* Check return SA SPI matches pkt SPI */\n+\tfor ( ; i < traffic->ipv4.num; i++) {\n+\t\tm = traffic->ipv4.pkts[i];\n+\t\tsa_idx = traffic->ipv4.res[i] & PROTECT_MASK;\n+\t\tif (sa_idx == 0 || !inbound_sa_check(sa_ctx, m, sa_idx)) {\n+\t\t\trte_pktmbuf_free(m);\n+\t\t\tcontinue;\n+\t\t}\n+\t\ttraffic->ipv4.pkts[j++] = m;\n+\t}\n+\ttraffic->ipv4.num = j;\n+}\n+\n+static inline void\n+process_pkts_outbound(struct socket_ctx *ctx, struct ipsec_traffic *traffic)\n+{\n+\tstruct sa_ctx *sa_ctx = ctx->sa_ipv4_out;\n+\tstruct sp_ctx *sp_ctx = ctx->sp_ipv4_out;\n+\tstruct rte_mbuf *m;\n+\tuint16_t idx, nb_pkts_out, i, j;\n+\tuint32_t sa_idx, res;\n+\tstruct ipsec_ctx ipsec_ctx;\n+\tstruct lcore_conf *qconf;\n+\n+\tqconf = &lcore_conf[rte_lcore_id()];\n+\tipsec_ctx.cdev = qconf->cdev;\n+\tipsec_ctx.queue = qconf->cdev_q;\n+\tipsec_ctx.sa_ctx = sa_ctx;\n+\n+\trte_acl_classify((struct rte_acl_ctx *)sp_ctx, traffic->ipv4.data,\n+\t\t\ttraffic->ipv4.res, traffic->ipv4.num,\n+\t\t\tDEFAULT_MAX_CATEGORIES);\n+\tj = 0;\n+\tfor (i = 0; i < traffic->ipv4.num; i++) {\n+\t\tm = traffic->ipv4.pkts[i];\n+\t\tres = traffic->ipv4.res[i];\n+\t\tsa_idx = res & PROTECT_MASK;\n+\t\tif (res & DISCARD) {\n+\t\t\trte_pktmbuf_free(m);\n+\t\t} else if (sa_idx != 0) {\n+\t\t\ttraffic->ipsec4.res[traffic->ipsec4.num] = sa_idx;\n+\t\t\ttraffic->ipsec4.pkts[traffic->ipsec4.num++] = m;\n+\t\t} else /* BYPASS */\n+\t\t\ttraffic->ipv4.pkts[j++] = m;\n+\t}\n+\ttraffic->ipv4.num = j;\n+\n+\tnb_pkts_out = ipsec_outbound(&ipsec_ctx, traffic->ipsec4.pkts,\n+\t\t\ttraffic->ipsec4.res, traffic->ipsec4.num,\n+\t\t\tMAX_PKT_BURST);\n+\n+\tfor (i = 0; i < nb_pkts_out; i++) {\n+\t\tidx = traffic->ipv4.num++;\n+\t\tm = traffic->ipsec4.pkts[i];\n+\t\ttraffic->ipv4.pkts[idx] = m;\n+\t}\n+}\n+\n+static inline void\n+route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)\n+{\n+\tuint16_t hop[MAX_PKT_BURST * 2];\n+\tuint32_t dst_ip[MAX_PKT_BURST * 2];\n+\tuint16_t i, offset;\n+\n+\tif (nb_pkts == 0)\n+\t\treturn;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\toffset = offsetof(struct ip, ip_dst);\n+\t\tdst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i],\n+\t\t\t\tuint32_t *, offset);\n+\t\tdst_ip[i] = rte_be_to_cpu_32(dst_ip[i]);\n+\t}\n+\n+\trte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts);\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tif ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) {\n+\t\t\trte_pktmbuf_free(pkts[i]);\n+\t\t\tcontinue;\n+\t\t}\n+\t\tsend_single_packet(pkts[i], hop[i] & 0xff);\n+\t}\n+}\n+\n+static inline void\n+process_pkts(struct socket_ctx *ctx, struct rte_mbuf **pkts,\n+\t\tuint8_t nb_pkts, uint8_t portid)\n+{\n+\tstruct ipsec_traffic traffic;\n+\n+\tprepare_traffic(pkts, &traffic, nb_pkts);\n+\n+\tif (UNPROTECTED_PORT(portid))\n+\t\tprocess_pkts_inbound(ctx, &traffic);\n+\telse\n+\t\tprocess_pkts_outbound(ctx, &traffic);\n+\n+\troute_pkts(ctx->rt_ipv4, traffic.ipv4.pkts, traffic.ipv4.num);\n+}\n+\n+static inline void\n+drain_buffers(struct lcore_conf *qconf)\n+{\n+\tstruct buffer *buf;\n+\tunsigned portid;\n+\n+\tfor (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {\n+\t\tbuf = &qconf->tx_mbufs[portid];\n+\t\tif (buf->len == 0)\n+\t\t\tcontinue;\n+\t\tsend_burst(qconf, buf->len, portid);\n+\t\tbuf->len = 0;\n+\t}\n+}\n+\n+/* main processing loop */\n+static int\n+main_loop(__attribute__((unused)) void *dummy)\n+{\n+\tstruct rte_mbuf *pkts[MAX_PKT_BURST];\n+\tunsigned lcore_id;\n+\tuint64_t prev_tsc, diff_tsc, cur_tsc;\n+\tint i, nb_rx;\n+\tuint8_t portid, queueid;\n+\tstruct lcore_conf *qconf;\n+\tint socket_id;\n+\tconst uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)\n+\t\t\t/ US_PER_S * BURST_TX_DRAIN_US;\n+\tstruct socket_ctx *ctx;\n+\tstruct lcore_rx_queue *rxql;\n+\n+\tprev_tsc = 0;\n+\tlcore_id = rte_lcore_id();\n+\tqconf = &lcore_conf[lcore_id];\n+\trxql = qconf->rx_queue_list;\n+\tsocket_id = rte_lcore_to_socket_id(lcore_id);\n+\n+\tctx = &socket_ctx[socket_id];\n+\n+\tif (qconf->nb_rx_queue == 0) {\n+\t\tRTE_LOG(INFO, IPSEC, \"lcore %u has nothing to do\\n\", lcore_id);\n+\t\treturn 0;\n+\t}\n+\n+\tRTE_LOG(INFO, IPSEC, \"entering main loop on lcore %u\\n\", lcore_id);\n+\n+\tfor (i = 0; i < qconf->nb_rx_queue; i++) {\n+\t\tportid = rxql[i].port_id;\n+\t\tqueueid = rxql[i].queue_id;\n+\t\tRTE_LOG(INFO, IPSEC,\n+\t\t\t\" -- lcoreid=%u portid=%hhu rxqueueid=%hhu\\n\",\n+\t\t\tlcore_id, portid, queueid);\n+\t}\n+\n+\twhile (1) {\n+\t\tcur_tsc = rte_rdtsc();\n+\n+\t\t/* TX queue buffer drain */\n+\t\tdiff_tsc = cur_tsc - prev_tsc;\n+\n+\t\tif (unlikely(diff_tsc > drain_tsc)) {\n+\t\t\tdrain_buffers(qconf);\n+\t\t\tprev_tsc = cur_tsc;\n+\t\t}\n+\n+\t\t/* Read packet from RX queues */\n+\t\tfor (i = 0; i < qconf->nb_rx_queue; ++i) {\n+\t\t\tportid = rxql[i].port_id;\n+\t\t\tqueueid = rxql[i].queue_id;\n+\t\t\tnb_rx = rte_eth_rx_burst(portid, queueid,\n+\t\t\t\t\tpkts, MAX_PKT_BURST);\n+\n+\t\t\tif (nb_rx > 0)\n+\t\t\t\tprocess_pkts(ctx, pkts, nb_rx, portid);\n+\t\t}\n+\t}\n+}\n+\n+static int\n+check_params(void)\n+{\n+\tuint8_t lcore, portid, nb_ports;\n+\tuint16_t i;\n+\tint socket_id;\n+\n+\tif (lcore_params == NULL) {\n+\t\tprintf(\"Error: No port/queue/core mappings\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tnb_ports = rte_eth_dev_count();\n+\tif (nb_ports > RTE_MAX_ETHPORTS)\n+\t\tnb_ports = RTE_MAX_ETHPORTS;\n+\n+\tfor (i = 0; i < nb_lcore_params; ++i) {\n+\t\tlcore = lcore_params[i].lcore_id;\n+\t\tif (!rte_lcore_is_enabled(lcore)) {\n+\t\t\tprintf(\"error: lcore %hhu is not enabled in \"\n+\t\t\t\t\"lcore mask\\n\", lcore);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tsocket_id = rte_lcore_to_socket_id(lcore);\n+\t\tif (socket_id != 0 && numa_on == 0) {\n+\t\t\tprintf(\"warning: lcore %hhu is on socket %d \"\n+\t\t\t\t\"with numa off\\n\",\n+\t\t\t\tlcore, socket_id);\n+\t\t}\n+\t\tportid = lcore_params[i].port_id;\n+\t\tif ((enabled_port_mask & (1 << portid)) == 0) {\n+\t\t\tprintf(\"port %u is not enabled in port mask\\n\", portid);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (portid >= nb_ports) {\n+\t\t\tprintf(\"port %u is not present on the board\\n\", portid);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static uint8_t\n+get_port_nb_rx_queues(const uint8_t port)\n+{\n+\tint queue = -1;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < nb_lcore_params; ++i) {\n+\t\tif (lcore_params[i].port_id == port &&\n+\t\t\t\tlcore_params[i].queue_id > queue)\n+\t\t\tqueue = lcore_params[i].queue_id;\n+\t}\n+\treturn (uint8_t)(++queue);\n+}\n+\n+static int\n+init_lcore_rx_queues(void)\n+{\n+\tuint16_t i, nb_rx_queue;\n+\tuint8_t lcore;\n+\n+\tfor (i = 0; i < nb_lcore_params; ++i) {\n+\t\tlcore = lcore_params[i].lcore_id;\n+\t\tnb_rx_queue = lcore_conf[lcore].nb_rx_queue;\n+\t\tif (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {\n+\t\t\tprintf(\"error: too many queues (%u) for lcore: %u\\n\",\n+\t\t\t\t\tnb_rx_queue + 1, lcore);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tlcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =\n+\t\t\tlcore_params[i].port_id;\n+\t\tlcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =\n+\t\t\tlcore_params[i].queue_id;\n+\t\tlcore_conf[lcore].nb_rx_queue++;\n+\t}\n+\treturn 0;\n+}\n+\n+/* display usage */\n+static void\n+print_usage(const char *prgname)\n+{\n+\tprintf(\"%s [EAL options] -- -p PORTMASK -P -u PORTMASK\"\n+\t\t\"  --\"OPTION_CONFIG\" (port,queue,lcore)[,(port,queue,lcore]\"\n+\t\t\" --EP0|--EP1 --cdev AESNI|QAT\\n\"\n+\t\t\"  -p PORTMASK: hexadecimal bitmask of ports to configure\\n\"\n+\t\t\"  -P : enable promiscuous mode\\n\"\n+\t\t\"  -u PORTMASK: hexadecimal bitmask of unprotected ports\\n\"\n+\t\t\"  --\"OPTION_CONFIG\": (port,queue,lcore): \"\n+\t\t\"rx queues configuration\\n\"\n+\t\t\"  --cdev AESNI | QAT\\n\"\n+\t\t\"  --EP0: Configure as Endpoint 0\\n\"\n+\t\t\"  --EP1: Configure as Endpoint 1\\n\", prgname);\n+}\n+\n+static int\n+parse_portmask(const char *portmask)\n+{\n+\tchar *end = NULL;\n+\tunsigned long pm;\n+\n+\t/* parse hexadecimal string */\n+\tpm = strtoul(portmask, &end, 16);\n+\tif ((portmask[0] == '\\0') || (end == NULL) || (*end != '\\0'))\n+\t\treturn -1;\n+\n+\tif (pm == 0)\n+\t\treturn -1;\n+\n+\treturn pm;\n+}\n+\n+static int\n+parse_config(const char *q_arg)\n+{\n+\tchar s[256];\n+\tconst char *p, *p0 = q_arg;\n+\tchar *end;\n+\tenum fieldnames {\n+\t\tFLD_PORT = 0,\n+\t\tFLD_QUEUE,\n+\t\tFLD_LCORE,\n+\t\t_NUM_FLD\n+\t};\n+\tunsigned long int_fld[_NUM_FLD];\n+\tchar *str_fld[_NUM_FLD];\n+\tint i;\n+\tunsigned size;\n+\n+\tnb_lcore_params = 0;\n+\n+\twhile ((p = strchr(p0, '(')) != NULL) {\n+\t\t++p;\n+\t\tp0 = strchr(p, ')');\n+\t\tif (p0 == NULL)\n+\t\t\treturn -1;\n+\n+\t\tsize = p0 - p;\n+\t\tif (size >= sizeof(s))\n+\t\t\treturn -1;\n+\n+\t\tsnprintf(s, sizeof(s), \"%.*s\", size, p);\n+\t\tif (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=\n+\t\t\t\t_NUM_FLD)\n+\t\t\treturn -1;\n+\t\tfor (i = 0; i < _NUM_FLD; i++) {\n+\t\t\terrno = 0;\n+\t\t\tint_fld[i] = strtoul(str_fld[i], &end, 0);\n+\t\t\tif (errno != 0 || end == str_fld[i] || int_fld[i] > 255)\n+\t\t\t\treturn -1;\n+\t\t}\n+\t\tif (nb_lcore_params >= MAX_LCORE_PARAMS) {\n+\t\t\tprintf(\"exceeded max number of lcore params: %hu\\n\",\n+\t\t\t\tnb_lcore_params);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tlcore_params_array[nb_lcore_params].port_id =\n+\t\t\t(uint8_t)int_fld[FLD_PORT];\n+\t\tlcore_params_array[nb_lcore_params].queue_id =\n+\t\t\t(uint8_t)int_fld[FLD_QUEUE];\n+\t\tlcore_params_array[nb_lcore_params].lcore_id =\n+\t\t\t(uint8_t)int_fld[FLD_LCORE];\n+\t\t++nb_lcore_params;\n+\t}\n+\tlcore_params = lcore_params_array;\n+\treturn 0;\n+}\n+\n+#define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))\n+static int\n+parse_args_long_options(struct option *lgopts, int option_index)\n+{\n+\tint ret = -1;\n+\tconst char *optname = lgopts[option_index].name;\n+\n+\tif (__STRNCMP(optname, OPTION_CONFIG)) {\n+\t\tret = parse_config(optarg);\n+\t\tif (ret)\n+\t\t\tprintf(\"invalid config\\n\");\n+\t}\n+\n+\tif (__STRNCMP(optname, OPTION_EP0)) {\n+\t\tprintf(\"endpoint 0\\n\");\n+\t\tep = 0;\n+\t\tret = 0;\n+\t}\n+\n+\tif (__STRNCMP(optname, OPTION_EP1)) {\n+\t\tprintf(\"endpoint 1\\n\");\n+\t\tep = 1;\n+\t\tret = 0;\n+\t}\n+\n+\tif (__STRNCMP(optname, OPTION_CDEV_TYPE)) {\n+\t\tif (__STRNCMP(optarg, \"AESNI\")) {\n+\t\t\tcdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;\n+\t\t\tret = 0;\n+\t\t} else if (__STRNCMP(optarg, \"QAT\")) {\n+\t\t\tcdev_type = RTE_CRYPTODEV_QAT_PMD;\n+\t\t\tret = 0;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+#undef __STRNCMP\n+\n+static int\n+parse_args(int argc, char **argv)\n+{\n+\tint opt, ret;\n+\tchar **argvopt;\n+\tint option_index;\n+\tchar *prgname = argv[0];\n+\tstatic struct option lgopts[] = {\n+\t\t{OPTION_CONFIG, 1, 0, 0},\n+\t\t{OPTION_EP0, 0, 0, 0},\n+\t\t{OPTION_EP1, 0, 0, 0},\n+\t\t{OPTION_CDEV_TYPE, 1, 0, 0},\n+\t\t{NULL, 0, 0, 0}\n+\t};\n+\n+\targvopt = argv;\n+\n+\twhile ((opt = getopt_long(argc, argvopt, \"p:Pu:\",\n+\t\t\t\tlgopts, &option_index)) != EOF) {\n+\n+\t\tswitch (opt) {\n+\t\tcase 'p':\n+\t\t\tenabled_port_mask = parse_portmask(optarg);\n+\t\t\tif (enabled_port_mask == 0) {\n+\t\t\t\tprintf(\"invalid portmask\\n\");\n+\t\t\t\tprint_usage(prgname);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase 'P':\n+\t\t\tprintf(\"Promiscuous mode selected\\n\");\n+\t\t\tpromiscuous_on = 1;\n+\t\t\tbreak;\n+\t\tcase 'u':\n+\t\t\tunprotected_port_mask = parse_portmask(optarg);\n+\t\t\tif (unprotected_port_mask == 0) {\n+\t\t\t\tprintf(\"invalid unprotected portmask\\n\");\n+\t\t\t\tprint_usage(prgname);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase 0:\n+\t\t\tif (parse_args_long_options(lgopts, option_index)) {\n+\t\t\t\tprint_usage(prgname);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tprint_usage(prgname);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (optind >= 0)\n+\t\targv[optind-1] = prgname;\n+\n+\tret = optind-1;\n+\toptind = 0; /* reset getopt lib */\n+\treturn ret;\n+}\n+\n+static void\n+print_ethaddr(const char *name, const struct ether_addr *eth_addr)\n+{\n+\tchar buf[ETHER_ADDR_FMT_SIZE];\n+\tether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);\n+\tprintf(\"%s%s\", name, buf);\n+}\n+\n+/* Check the link status of all ports in up to 9s, and print them finally */\n+static void\n+check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)\n+{\n+#define CHECK_INTERVAL 100 /* 100ms */\n+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */\n+\tuint8_t portid, count, all_ports_up, print_flag = 0;\n+\tstruct rte_eth_link link;\n+\n+\tprintf(\"\\nChecking link status\");\n+\tfflush(stdout);\n+\tfor (count = 0; count <= MAX_CHECK_TIME; count++) {\n+\t\tall_ports_up = 1;\n+\t\tfor (portid = 0; portid < port_num; portid++) {\n+\t\t\tif ((port_mask & (1 << portid)) == 0)\n+\t\t\t\tcontinue;\n+\t\t\tmemset(&link, 0, sizeof(link));\n+\t\t\trte_eth_link_get_nowait(portid, &link);\n+\t\t\t/* print link status if flag set */\n+\t\t\tif (print_flag == 1) {\n+\t\t\t\tif (link.link_status)\n+\t\t\t\t\tprintf(\"Port %d Link Up - speed %u \"\n+\t\t\t\t\t\t\"Mbps - %s\\n\", (uint8_t)portid,\n+\t\t\t\t\t\t(unsigned)link.link_speed,\n+\t\t\t\t(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?\n+\t\t\t\t\t(\"full-duplex\") : (\"half-duplex\\n\"));\n+\t\t\t\telse\n+\t\t\t\t\tprintf(\"Port %d Link Down\\n\",\n+\t\t\t\t\t\t(uint8_t)portid);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\t/* clear all_ports_up flag if any link down */\n+\t\t\tif (link.link_status == 0) {\n+\t\t\t\tall_ports_up = 0;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t\t/* after finally printing all link status, get out */\n+\t\tif (print_flag == 1)\n+\t\t\tbreak;\n+\n+\t\tif (all_ports_up == 0) {\n+\t\t\tprintf(\".\");\n+\t\t\tfflush(stdout);\n+\t\t\trte_delay_ms(CHECK_INTERVAL);\n+\t\t}\n+\n+\t\t/* set the print_flag if all ports up or timeout */\n+\t\tif (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {\n+\t\t\tprint_flag = 1;\n+\t\t\tprintf(\"done\\n\");\n+\t\t}\n+\t}\n+}\n+\n+static uint16_t\n+find_next_cdev(unsigned cdev_type, uint16_t start_cdev_id)\n+{\n+\tuint16_t cdev_id, cnt;\n+\tstruct rte_cryptodev_info info;\n+\n+\tcnt = rte_cryptodev_count();\n+\tfor (cdev_id = start_cdev_id; cdev_id < cnt; cdev_id++) {\n+\t\trte_cryptodev_info_get(cdev_id, &info);\n+\t\tif (info.dev_type == cdev_type)\n+\t\t\treturn cdev_id;\n+\t}\n+\n+\treturn -1;\n+}\n+\n+static uint16_t\n+find_cdev_socket(int socket_id, unsigned cdev_type)\n+{\n+\tuint16_t cdev_id, cnt;\n+\tstruct rte_cryptodev_info info;\n+\tint cdev_socket;\n+\n+\tcnt = rte_cryptodev_count();\n+\tfor (cdev_id = 0; cdev_id < cnt; cdev_id++) {\n+\t\trte_cryptodev_info_get(cdev_id, &info);\n+\t\tcdev_socket = rte_cryptodev_socket_id(cdev_id);\n+\t\tif ((info.dev_type == cdev_type) && (cdev_socket == socket_id))\n+\t\t\treturn cdev_id;\n+\t}\n+\n+\treturn -1;\n+}\n+\n+static int\n+cryptodevs_init(void)\n+{\n+\tstruct rte_cryptodev_config dev_conf;\n+\tstruct rte_cryptodev_qp_conf qp_conf;\n+\tstruct lcore_conf *qconf;\n+\tuint16_t cnt, lcore_id, cdev_id;\n+\tint ret, i, socket_id;\n+\n+\tif (cdev_type == RTE_CRYPTODEV_QAT_PMD) {\n+\t\tcnt = rte_cryptodev_count_devtype(RTE_CRYPTODEV_QAT_PMD);\n+\t\tprintf(\"Found %u QAT devices\\n\", cnt);\n+\t\tif (cnt < nb_lcores)\n+\t\t\trte_panic(\"Not enough QAT devices detected, \"\n+\t\t\t\t\t\"need %u (1 per core), found %d\\n\",\n+\t\t\t\t\tnb_lcores, cnt);\n+\t} else if (cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {\n+\t\tprintf(\"Initializing %u AESNI vdevs\\n\", rte_lcore_count());\n+\t\tfor (i = 0; i < (int)rte_lcore_count(); i++) {\n+\t\t\tret = rte_eal_vdev_init(\n+\t\t\t\t\tCRYPTODEV_NAME_AESNI_MB_PMD,\n+\t\t\t\t\tNULL);\n+\t\t\tif (ret < 0)\n+\t\t\t\trte_panic(\"Failed to create AESNI-MB vdev\\n\");\n+\t\t}\n+\t} else\n+\t\trte_panic(\"Need to set cryptodev type option --cdev\\n\");\n+\n+\tcdev_id = 0;\n+\tfor (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {\n+\t\tif (rte_lcore_is_enabled(lcore_id) == 0)\n+\t\t\tcontinue;\n+\n+\t\tsocket_id = rte_lcore_to_socket_id(lcore_id);\n+\t\tcdev_id = find_next_cdev(cdev_type, cdev_id);\n+\t\tqconf = &lcore_conf[lcore_id];\n+\t\tqconf->cdev = cdev_id;\n+\t\tqconf->cdev_q = 0;\n+\t\tif (cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {\n+\t\t\tdev_conf.socket_id = socket_id;\n+\t\t\tprintf(\"Initializing AESNI-MB device %u socket %u\\n\",\n+\t\t\t\t\tcdev_id, socket_id);\n+\t\t} else {\n+\t\t\tdev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);\n+\t\t\tprintf(\"Initialising QAT device %u\\n\", cdev_id);\n+\t\t}\n+\n+\t\tdev_conf.nb_queue_pairs = 1;\n+\t\tdev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;\n+\t\tdev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;\n+\n+\t\tif (rte_cryptodev_configure(cdev_id, &dev_conf))\n+\t\t\trte_panic(\"Failed to initialize crypodev %u\\n\",\n+\t\t\t\t\tcdev_id);\n+\n+\t\tqp_conf.nb_descriptors = CDEV_MP_NB_OBJS;\n+\t\tif (rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,\n+\t\t\t\t\tdev_conf.socket_id))\n+\t\t\trte_panic(\"Failed to setup queue %u for cdev_id %u\\n\",\n+\t\t\t\t\t0, cdev_id);\n+\t\tcdev_id++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+port_init(uint8_t portid)\n+{\n+\tstruct rte_eth_dev_info dev_info;\n+\tstruct rte_eth_txconf *txconf;\n+\tuint16_t nb_tx_queue, nb_rx_queue;\n+\tuint16_t tx_queueid, rx_queueid, queue, lcore_id;\n+\tint ret, socket_id;\n+\tstruct lcore_conf *qconf;\n+\tstruct ether_addr ethaddr;\n+\n+\trte_eth_dev_info_get(portid, &dev_info);\n+\n+\tprintf(\"Configuring device port %u:\\n\", portid);\n+\n+\trte_eth_macaddr_get(portid, &ethaddr);\n+\tethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);\n+\tprint_ethaddr(\"Address: \", &ethaddr);\n+\tprintf(\"\\n\");\n+\n+\tnb_rx_queue = get_port_nb_rx_queues(portid);\n+\tnb_tx_queue = nb_lcores;\n+\n+\tif (nb_rx_queue > dev_info.max_rx_queues)\n+\t\trte_exit(EXIT_FAILURE, \"Error: queue %u not available \"\n+\t\t\t\t\"(max rx queue is %u)\\n\",\n+\t\t\t\tnb_rx_queue, dev_info.max_rx_queues);\n+\n+\tif (nb_tx_queue > dev_info.max_tx_queues)\n+\t\trte_exit(EXIT_FAILURE, \"Error: queue %u not available \"\n+\t\t\t\t\"(max tx queue is %u)\\n\",\n+\t\t\t\tnb_tx_queue, dev_info.max_tx_queues);\n+\n+\tprintf(\"Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\\n\",\n+\t\t\tnb_rx_queue, nb_tx_queue);\n+\n+\tret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,\n+\t\t\t&port_conf);\n+\tif (ret < 0)\n+\t\trte_exit(EXIT_FAILURE, \"Cannot configure device: \"\n+\t\t\t\t\"err=%d, port=%d\\n\", ret, portid);\n+\n+\t/* init one TX queue per lcore */\n+\ttx_queueid = 0;\n+\tfor (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {\n+\t\tif (rte_lcore_is_enabled(lcore_id) == 0)\n+\t\t\tcontinue;\n+\n+\t\tif (numa_on)\n+\t\t\tsocket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);\n+\t\telse\n+\t\t\tsocket_id = 0;\n+\n+\t\t/* init TX queue */\n+\t\tprintf(\"Setup txq=%u,%d,%d\\n\", lcore_id, tx_queueid, socket_id);\n+\n+\t\ttxconf = &dev_info.default_txconf;\n+\t\ttxconf->txq_flags = 0;\n+\n+\t\tret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,\n+\t\t\t\tsocket_id, txconf);\n+\t\tif (ret < 0)\n+\t\t\trte_exit(EXIT_FAILURE, \"rte_eth_tx_queue_setup: \"\n+\t\t\t\t\t\"err=%d, port=%d\\n\", ret, portid);\n+\n+\t\tqconf = &lcore_conf[lcore_id];\n+\t\tqconf->tx_queue_id[portid] = tx_queueid;\n+\t\ttx_queueid++;\n+\n+\t\t/* init RX queues */\n+\t\tfor (queue = 0; queue < qconf->nb_rx_queue; ++queue) {\n+\t\t\tif (portid != qconf->rx_queue_list[queue].port_id)\n+\t\t\t\tcontinue;\n+\n+\t\t\trx_queueid = qconf->rx_queue_list[queue].queue_id;\n+\n+\t\t\tprintf(\"Setup rxq=%d,%d,%d\\n\", portid, rx_queueid,\n+\t\t\t\t\tsocket_id);\n+\n+\t\t\tret = rte_eth_rx_queue_setup(portid, rx_queueid,\n+\t\t\t\t\tnb_rxd,\tsocket_id, NULL,\n+\t\t\t\t\tsocket_ctx[socket_id].mbuf_pool);\n+\t\t\tif (ret < 0)\n+\t\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t\t\"rte_eth_rx_queue_setup: err=%d, \"\n+\t\t\t\t\t\"port=%d\\n\", ret, portid);\n+\t\t}\n+\t}\n+\tprintf(\"\\n\");\n+}\n+\n+static void\n+pool_init(struct socket_ctx *ctx, int socket_id, unsigned nb_mbuf)\n+{\n+\tchar s[64];\n+\n+\tsnprintf(s, sizeof(s), \"mbuf_pool_%d\", socket_id);\n+\tctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,\n+\t\t\tMEMPOOL_CACHE_SIZE, ipsec_metadata_size(),\n+\t\t\tRTE_MBUF_DEFAULT_BUF_SIZE,\n+\t\t\tsocket_id);\n+\tif (ctx->mbuf_pool == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Cannot init mbuf pool on socket %d\\n\",\n+\t\t\t\tsocket_id);\n+\telse\n+\t\tprintf(\"Allocated mbuf pool on socket %d\\n\", socket_id);\n+}\n+\n+int\n+main(int argc, char **argv)\n+{\n+\tint ret;\n+\tunsigned lcore_id, nb_ports;\n+\tuint8_t portid, socket_id;\n+\n+\t/* init EAL */\n+\tret = rte_eal_init(argc, argv);\n+\tif (ret < 0)\n+\t\trte_exit(EXIT_FAILURE, \"Invalid EAL parameters\\n\");\n+\targc -= ret;\n+\targv += ret;\n+\n+\t/* parse application arguments (after the EAL ones) */\n+\tret = parse_args(argc, argv);\n+\tif (ret < 0)\n+\t\trte_exit(EXIT_FAILURE, \"Invalid parameters\\n\");\n+\n+\tif (ep < 0)\n+\t\trte_exit(EXIT_FAILURE, \"need to choose either EP0 or EP1\\n\");\n+\n+\tif ((unprotected_port_mask & enabled_port_mask) !=\n+\t\t\tunprotected_port_mask)\n+\t\trte_exit(EXIT_FAILURE, \"Invalid unprotected portmask 0x%x\\n\",\n+\t\t\t\tunprotected_port_mask);\n+\n+\tnb_ports = rte_eth_dev_count();\n+\tif (nb_ports > RTE_MAX_ETHPORTS)\n+\t\tnb_ports = RTE_MAX_ETHPORTS;\n+\n+\tif (check_params() < 0)\n+\t\trte_exit(EXIT_FAILURE, \"check_params failed\\n\");\n+\n+\tret = init_lcore_rx_queues();\n+\tif (ret < 0)\n+\t\trte_exit(EXIT_FAILURE, \"init_lcore_rx_queues failed\\n\");\n+\n+\tnb_lcores = rte_lcore_count();\n+\n+\tcryptodevs_init();\n+\n+\t/* Replicate each contex per socket */\n+\tfor (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {\n+\t\tif (rte_lcore_is_enabled(lcore_id) == 0)\n+\t\t\tcontinue;\n+\n+\t\tif (numa_on)\n+\t\t\tsocket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);\n+\t\telse\n+\t\t\tsocket_id = 0;\n+\n+\t\tif (socket_ctx[socket_id].mbuf_pool)\n+\t\t\tcontinue;\n+\n+\t\tsa_init(&socket_ctx[socket_id], socket_id, ep,\n+\t\t\t\tfind_cdev_socket(socket_id, cdev_type));\n+\n+\t\tsp_init(&socket_ctx[socket_id], socket_id, ep);\n+\n+\t\trt_init(&socket_ctx[socket_id], socket_id, ep);\n+\n+\t\tpool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);\n+\t}\n+\n+\tfor (portid = 0; portid < nb_ports; portid++) {\n+\t\tif ((enabled_port_mask & (1 << portid)) == 0)\n+\t\t\tcontinue;\n+\n+\t\tport_init(portid);\n+\t}\n+\n+\t/* start ports */\n+\tfor (portid = 0; portid < nb_ports; portid++) {\n+\t\tif ((enabled_port_mask & (1 << portid)) == 0)\n+\t\t\tcontinue;\n+\n+\t\t/* Start device */\n+\t\tret = rte_eth_dev_start(portid);\n+\t\tif (ret < 0)\n+\t\t\trte_exit(EXIT_FAILURE, \"rte_eth_dev_start: \"\n+\t\t\t\t\t\"err=%d, port=%d\\n\", ret, portid);\n+\t\t/*\n+\t\t * If enabled, put device in promiscuous mode.\n+\t\t * This allows IO forwarding mode to forward packets\n+\t\t * to itself through 2 cross-connected  ports of the\n+\t\t * target machine.\n+\t\t */\n+\t\tif (promiscuous_on)\n+\t\t\trte_eth_promiscuous_enable(portid);\n+\t}\n+\n+\tcheck_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);\n+\n+\t/* launch per-lcore init on every lcore */\n+\trte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);\n+\tRTE_LCORE_FOREACH_SLAVE(lcore_id) {\n+\t\tif (rte_eal_wait_lcore(lcore_id) < 0)\n+\t\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\ndiff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c\nnew file mode 100644\nindex 0000000..83e3326\n--- /dev/null\n+++ b/examples/ipsec-secgw/ipsec.c\n@@ -0,0 +1,138 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <netinet/in.h>\n+#include <netinet/ip.h>\n+\n+#include <rte_branch_prediction.h>\n+#include <rte_log.h>\n+#include <rte_crypto.h>\n+#include <rte_cryptodev.h>\n+#include <rte_mbuf.h>\n+\n+#include \"ipsec.h\"\n+\n+static inline uint16_t\n+ipsec_processing(uint8_t cdev_id, uint16_t qp_id, struct rte_mbuf *pkts[],\n+\t\tstruct ipsec_sa *sas[], uint16_t nb_pkts, uint16_t max_pkts)\n+{\n+\tint ret, i, j;\n+\tstruct ipsec_mbuf_metadata *priv;\n+\tstruct rte_mbuf_offload *ol;\n+\tstruct ipsec_sa *sa;\n+\n+\tj = 0;\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\trte_prefetch0(sas[i]);\n+\t\trte_prefetch0(pkts[i]);\n+\n+\t\tpriv = get_priv(pkts[i]);\n+\t\tol = &priv->ol;\n+\t\tsa = sas[i];\n+\t\tpriv->sa = sa;\n+\n+\t\tIPSEC_ASSERT(sa != NULL);\n+\n+\t\t__rte_pktmbuf_offload_reset(ol, RTE_PKTMBUF_OL_CRYPTO);\n+\n+\t\trte_crypto_op_attach_session(&ol->op.crypto,\n+\t\t\t\tsa->crypto_session);\n+\n+\t\tpkts[i]->offload_ops = ol;\n+\n+\t\tret = sa->pre_crypto(pkts[i], sa, &ol->op.crypto);\n+\t\tif (unlikely(ret))\n+\t\t\trte_pktmbuf_free(pkts[i]);\n+\t\telse\n+\t\t\tpkts[j++] = pkts[i];\n+\t}\n+\tnb_pkts = j;\n+\n+\tret = rte_cryptodev_enqueue_burst(cdev_id, qp_id, pkts, nb_pkts);\n+\tif (ret < nb_pkts) {\n+\t\tIPSEC_LOG(DEBUG, IPSEC, \"Cryptodev %u queue %u:\"\n+\t\t\t\t\" enqueued %u packets (out of %u)\\n\",\n+\t\t\t\t cdev_id, qp_id, ret, nb_pkts);\n+\t\tfor (i = ret; i < nb_pkts; i++)\n+\t\t\trte_pktmbuf_free(pkts[i]);\n+\t}\n+\n+\tnb_pkts = rte_cryptodev_dequeue_burst(cdev_id, qp_id, pkts, max_pkts);\n+\tif ((nb_pkts != 0) && (nb_pkts < max_pkts))\n+\t\tIPSEC_LOG(DEBUG, IPSEC, \"Cryptodev %u queue %u:\"\n+\t\t\t\t\" dequeued %u packets (out of %u)\\n\",\n+\t\t\t\t cdev_id, qp_id, nb_pkts, max_pkts);\n+\n+\tj = 0;\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\trte_prefetch0(pkts[i]);\n+\n+\t\tpriv = get_priv(pkts[i]);\n+\t\tol = &priv->ol;\n+\t\tsa = priv->sa;\n+\t\trte_prefetch0(sa);\n+\n+\t\tIPSEC_ASSERT(sa != NULL);\n+\n+\t\tret = sa->post_crypto(pkts[i], sa, &ol->op.crypto);\n+\t\tif (unlikely(ret))\n+\t\t\trte_pktmbuf_free(pkts[i]);\n+\t\telse\n+\t\t\tpkts[j++] = pkts[i];\n+\t}\n+\n+\t/* return packets */\n+\treturn j;\n+}\n+\n+uint16_t\n+ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],\n+\t\tuint16_t nb_pkts, uint16_t len)\n+{\n+\tstruct ipsec_sa *sas[nb_pkts];\n+\n+\tinbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);\n+\n+\treturn ipsec_processing(ctx->cdev, ctx->queue, pkts, sas, nb_pkts, len);\n+}\n+\n+uint16_t\n+ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],\n+\t\tuint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)\n+{\n+\tstruct ipsec_sa *sas[nb_pkts];\n+\n+\toutbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);\n+\n+\treturn ipsec_processing(ctx->cdev, ctx->queue, pkts, sas, nb_pkts, len);\n+}\ndiff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h\nnew file mode 100644\nindex 0000000..abf9d5f\n--- /dev/null\n+++ b/examples/ipsec-secgw/ipsec.h\n@@ -0,0 +1,184 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef __IPSEC_H__\n+#define __IPSEC_H__\n+\n+#include <stdint.h>\n+#include <netinet/in.h>\n+#include <netinet/ip.h>\n+\n+#include <rte_mbuf_offload.h>\n+#include <rte_byteorder.h>\n+#include <rte_ip.h>\n+\n+#define RTE_LOGTYPE_IPSEC       RTE_LOGTYPE_USER1\n+#define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2\n+#define RTE_LOGTYPE_IPSEC_IPIP  RTE_LOGTYPE_USER3\n+\n+#ifdef IPSEC_DEBUG\n+#define IPSEC_ASSERT(exp)                                            \\\n+if (!(exp)) {                                                        \\\n+\trte_panic(\"line%d\\tassert \\\"\" #exp \"\\\" failed\\n\", __LINE__); \\\n+}\n+\n+#define IPSEC_LOG RTE_LOG\n+#else\n+#define IPSEC_ASSERT(exp) do {} while (0)\n+#define IPSEC_LOG(...) do {} while (0)\n+#endif /* IPSEC_DEBUG */\n+\n+#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */\n+\n+#define uint32_t_to_char(ip, a, b, c, d) do {\\\n+\t\t*a = (unsigned char)(ip >> 24 & 0xff);\\\n+\t\t*b = (unsigned char)(ip >> 16 & 0xff);\\\n+\t\t*c = (unsigned char)(ip >> 8 & 0xff);\\\n+\t\t*d = (unsigned char)(ip & 0xff);\\\n+\t} while (0)\n+\n+#define DEFAULT_MAX_CATEGORIES\t1\n+\n+#define IPSEC_SA_MAX_ENTRIES (64) /* must be power of 2, max 2 power 30 */\n+#define SPI2IDX(spi) (spi & (IPSEC_SA_MAX_ENTRIES - 1))\n+#define INVALID_SPI (0)\n+\n+#define DISCARD (0x80000000)\n+#define BYPASS (0x40000000)\n+#define PROTECT_MASK (0x3fffffff)\n+#define PROTECT(sa_idx) (SPI2IDX(sa_idx) & PROTECT_MASK) /* SA idx 30 bits */\n+\n+#define IPSEC_XFORM_MAX 2\n+\n+struct rte_crypto_xform;\n+struct ipsec_xform;\n+struct rte_cryptodev_session;\n+struct rte_mbuf;\n+\n+struct replay {\n+\tuint32_t seq_h;\n+};\n+\n+struct lifetime {\n+\tuint64_t bytes;\n+\tuint64_t seconds;\n+};\n+\n+struct ipsec_sa;\n+\n+typedef int (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa,\n+\t\tstruct rte_crypto_op *cop);\n+\n+struct ipsec_sa {\n+\tuint32_t spi;\n+\tuint32_t seq;\n+\tuint32_t src;\n+\tuint32_t dst;\n+\tstruct rte_cryptodev_session *crypto_session;\n+\tstruct rte_crypto_xform *xforms;\n+\tenum rte_crypto_cipher_algorithm cipher_algo;\n+\tenum rte_crypto_auth_algorithm auth_algo;\n+\tuint16_t digest_len;\n+\tuint16_t iv_len;\n+\tuint16_t block_size;\n+\tipsec_xform_fn pre_crypto;\n+\tipsec_xform_fn post_crypto;\n+\tuint32_t flags;\n+\t/* does not apply if no automated SA management (IKE) support */\n+\tstruct lifetime current;\n+\tstruct lifetime soft;\n+\tstruct lifetime hard;\n+\tstruct replay replay;\n+} __rte_cache_aligned;\n+\n+struct ipsec_mbuf_metadata {\n+\tstruct ipsec_sa *sa;\n+\tstruct rte_mbuf_offload ol;\n+};\n+\n+struct ipsec_ctx {\n+\tuint16_t cdev;\n+\tuint16_t queue;\n+\tstruct sa_ctx *sa_ctx;\n+};\n+\n+struct socket_ctx {\n+\tstruct sa_ctx *sa_ipv4_in;\n+\tstruct sa_ctx *sa_ipv4_out;\n+\tstruct sp_ctx *sp_ipv4_in;\n+\tstruct sp_ctx *sp_ipv4_out;\n+\tstruct rt_ctx *rt_ipv4;\n+\tstruct rte_mempool *mbuf_pool;\n+};\n+\n+uint16_t\n+ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],\n+\t\tuint16_t nb_pkts, uint16_t len);\n+\n+uint16_t\n+ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],\n+\t\tuint32_t sa_idx[], uint16_t nb_pkts, uint16_t len);\n+\n+static inline uint16_t\n+ipsec_metadata_size(void)\n+{\n+\treturn sizeof(struct ipsec_mbuf_metadata);\n+}\n+\n+static inline struct ipsec_mbuf_metadata *\n+get_priv(struct rte_mbuf *m)\n+{\n+\treturn RTE_PTR_ADD(m, sizeof(struct rte_mbuf));\n+}\n+\n+int\n+inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx);\n+\n+void\n+inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],\n+\t\tstruct ipsec_sa *sa[], uint16_t nb_pkts);\n+\n+void\n+outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],\n+\t\tstruct ipsec_sa *sa[], uint16_t nb_pkts);\n+\n+void\n+sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep);\n+\n+void\n+sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep, uint16_t cdev_id);\n+\n+void\n+rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep);\n+\n+#endif /* __IPSEC_H__ */\ndiff --git a/examples/ipsec-secgw/rt.c b/examples/ipsec-secgw/rt.c\nnew file mode 100644\nindex 0000000..82064b2\n--- /dev/null\n+++ b/examples/ipsec-secgw/rt.c\n@@ -0,0 +1,131 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Routing Table (RT)\n+ */\n+#include <rte_lpm.h>\n+#include <rte_errno.h>\n+\n+#include \"ipsec.h\"\n+\n+#define RT_IPV4_MAX_RULES         64\n+\n+struct ipv4_route {\n+\tuint32_t ip;\n+\tuint8_t  depth;\n+\tuint8_t  if_out;\n+};\n+\n+/* In the default routing table we have:\n+ * ep0 protected ports 0 and 1, and unprotected ports 2 and 3.\n+ */\n+static struct ipv4_route rt_ipv4_ep0[] = {\n+\t{ IPv4(172, 16, 2, 5), 32, 0 },\n+\t{ IPv4(172, 16, 2, 6), 32, 0 },\n+\t{ IPv4(172, 16, 2, 7), 32, 1 },\n+\t{ IPv4(172, 16, 2, 8), 32, 1 },\n+\n+\t{ IPv4(192, 168, 115, 0), 24, 2 },\n+\t{ IPv4(192, 168, 116, 0), 24, 2 },\n+\t{ IPv4(192, 168, 117, 0), 24, 3 },\n+\t{ IPv4(192, 168, 118, 0), 24, 3 }\n+};\n+\n+/* In the default routing table we have:\n+ * ep1 protected ports 0 and 1, and unprotected ports 2 and 3.\n+ */\n+static struct ipv4_route rt_ipv4_ep1[] = {\n+\t{ IPv4(172, 16, 1, 5), 32, 2 },\n+\t{ IPv4(172, 16, 1, 6), 32, 2 },\n+\t{ IPv4(172, 16, 1, 7), 32, 3 },\n+\t{ IPv4(172, 16, 1, 8), 32, 3 },\n+\n+\t{ IPv4(192, 168, 105, 0), 24, 0 },\n+\t{ IPv4(192, 168, 106, 0), 24, 0 },\n+\t{ IPv4(192, 168, 107, 0), 24, 1 },\n+\t{ IPv4(192, 168, 108, 0), 24, 1 }\n+};\n+\n+void\n+rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep)\n+{\n+\tchar name[PATH_MAX];\n+\tunsigned i;\n+\tint ret;\n+\tstruct rte_lpm *lpm;\n+\tstruct ipv4_route *rt;\n+\tchar a, b, c, d;\n+\tunsigned nb_routes;\n+\n+\tif (ctx == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"NULL context.\\n\");\n+\n+\tif (ctx->rt_ipv4 != NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Routing Table for socket %u already \"\n+\t\t\t\"initialized\\n\", socket_id);\n+\n+\tprintf(\"Creating Routing Table (RT) context with %u max routes\\n\",\n+\t\t\tRT_IPV4_MAX_RULES);\n+\n+\tif (ep == 0) {\n+\t\trt = rt_ipv4_ep0;\n+\t\tnb_routes = RTE_DIM(rt_ipv4_ep0);\n+\t} else if (ep == 1) {\n+\t\trt = rt_ipv4_ep1;\n+\t\tnb_routes = RTE_DIM(rt_ipv4_ep1);\n+\t} else\n+\t\trte_exit(EXIT_FAILURE, \"Invalid EP value %u. Only 0 or 1 \"\n+\t\t\t\"supported.\\n\", ep);\n+\n+\t/* create the LPM table */\n+\tsnprintf(name, sizeof(name), \"%s_%u\", \"rt_ipv4\", socket_id);\n+\tlpm = rte_lpm_create(name, socket_id, RT_IPV4_MAX_RULES, 0);\n+\tif (lpm == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Unable to create LPM table \"\n+\t\t\t\"on socket %d\\n\", socket_id);\n+\n+\t/* populate the LPM table */\n+\tfor (i = 0; i < nb_routes; i++) {\n+\t\tret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].if_out);\n+\t\tif (ret < 0)\n+\t\t\trte_exit(EXIT_FAILURE, \"Unable to add entry num %u to \"\n+\t\t\t\t\"LPM table on socket %d\\n\", i, socket_id);\n+\n+\t\tuint32_t_to_char(rt[i].ip, &a, &b, &c, &d);\n+\t\tprintf(\"LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\\n\",\n+\t\t\t\ta, b, c, d, rt[i].depth, rt[i].if_out);\n+\t}\n+\n+\tctx->rt_ipv4 = (struct rt_ctx *)lpm;\n+}\ndiff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c\nnew file mode 100644\nindex 0000000..5ea7592\n--- /dev/null\n+++ b/examples/ipsec-secgw/sa.c\n@@ -0,0 +1,391 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Security Associations\n+ */\n+#include <netinet/ip.h>\n+\n+#include <rte_memzone.h>\n+#include <rte_crypto.h>\n+#include <rte_cryptodev.h>\n+#include <rte_byteorder.h>\n+#include <rte_errno.h>\n+\n+#include \"ipsec.h\"\n+#include \"esp.h\"\n+\n+/* SAs EP0 Outbound */\n+const struct ipsec_sa sa_ep0_out[] = {\n+\t{ 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } }\n+};\n+\n+/* SAs EP0 Inbound */\n+const struct ipsec_sa sa_ep0_in[] = {\n+\t{ 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } }\n+};\n+\n+/* SAs EP1 Outbound */\n+const struct ipsec_sa sa_ep1_out[] = {\n+\t{ 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_outbound_pre_crypto,\n+\t\tesp4_tunnel_outbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } }\n+};\n+\n+/* SAs EP1 Inbound */\n+const struct ipsec_sa sa_ep1_in[] = {\n+\t{ 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } },\n+\t{ 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),\n+\t\tNULL, NULL,\n+\t\tRTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t12, 16, 16,\n+\t\tesp4_tunnel_inbound_pre_crypto,\n+\t\tesp4_tunnel_inbound_post_crypto,\n+\t\t0, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0 } }\n+};\n+\n+static uint8_t cipher_key[256] = \"sixteenbytes key\";\n+\n+/* AES CBC xform */\n+const struct rte_crypto_xform aescbc_enc_xf = {\n+\tNULL,\n+\tRTE_CRYPTO_XFORM_CIPHER,\n+\t.cipher = { RTE_CRYPTO_CIPHER_OP_ENCRYPT, RTE_CRYPTO_CIPHER_AES_CBC,\n+\t\t.key = { cipher_key, 0, 16 } }\n+};\n+\n+const struct rte_crypto_xform aescbc_dec_xf = {\n+\tNULL,\n+\tRTE_CRYPTO_XFORM_CIPHER,\n+\t.cipher = { RTE_CRYPTO_CIPHER_OP_DECRYPT, RTE_CRYPTO_CIPHER_AES_CBC,\n+\t\t.key = { cipher_key, 0, 16 } }\n+};\n+\n+static uint8_t auth_key[256] = \"twentybytes hash key\";\n+\n+/* SHA1 HMAC xform */\n+const struct rte_crypto_xform sha1hmac_gen_xf = {\n+\tNULL,\n+\tRTE_CRYPTO_XFORM_AUTH,\n+\t.auth = { RTE_CRYPTO_AUTH_OP_GENERATE, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t.key = { auth_key, 0, 20 }, 12, 0 }\n+};\n+\n+const struct rte_crypto_xform sha1hmac_verify_xf = {\n+\tNULL,\n+\tRTE_CRYPTO_XFORM_AUTH,\n+\t.auth = { RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_AUTH_SHA1_HMAC,\n+\t\t.key = { auth_key, 0, 20 }, 12, 0 }\n+};\n+\n+struct sa_ctx {\n+\tstruct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];\n+\tstruct {\n+\t\tstruct rte_crypto_xform a;\n+\t\tstruct rte_crypto_xform b;\n+\t} xf[IPSEC_SA_MAX_ENTRIES];\n+};\n+\n+static struct sa_ctx *\n+sa_ipv4_create(const char *name, int socket_id)\n+{\n+\tchar s[PATH_MAX];\n+\tstruct sa_ctx *sa_ctx;\n+\tunsigned mz_size;\n+\tconst struct rte_memzone *mz;\n+\n+\tsnprintf(s, sizeof(s), \"%s_%u\", name, socket_id);\n+\n+\t/* Create SA array table */\n+\tprintf(\"Creating SA context with %u maximum entries\\n\",\n+\t\t\tIPSEC_SA_MAX_ENTRIES);\n+\n+\tmz_size = sizeof(struct sa_ctx);\n+\tmz = rte_memzone_reserve(s, mz_size, socket_id,\n+\t\t\tRTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);\n+\tif (mz == NULL) {\n+\t\tprintf(\"Failed to allocate SA DB memory\\n\");\n+\t\trte_errno = -ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tsa_ctx = (struct sa_ctx *)mz->addr;\n+\n+\treturn sa_ctx;\n+}\n+\n+static int\n+sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],\n+\t\tunsigned nb_entries, uint16_t cdev_id, unsigned inbound)\n+{\n+\tstruct ipsec_sa *sa;\n+\tunsigned i, idx;\n+\n+\tfor (i = 0; i < nb_entries; i++) {\n+\t\tidx = SPI2IDX(entries[i].spi);\n+\t\tsa = &sa_ctx->sa[idx];\n+\t\tif (sa->spi != 0) {\n+\t\t\tprintf(\"Index %u already in use by SPI %u\\n\",\n+\t\t\t\t\tidx, sa->spi);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\t*sa = entries[i];\n+\t\tsa->src = rte_cpu_to_be_32(sa->src);\n+\t\tsa->dst = rte_cpu_to_be_32(sa->dst);\n+\t\tif (inbound) {\n+\t\t\tsa_ctx->xf[idx].a = sha1hmac_verify_xf;\n+\t\t\tsa_ctx->xf[idx].b = aescbc_dec_xf;\n+\t\t} else { /* outbound */\n+\t\t\tsa_ctx->xf[idx].a = aescbc_enc_xf;\n+\t\t\tsa_ctx->xf[idx].b = sha1hmac_gen_xf;\n+\t\t}\n+\t\tsa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;\n+\t\tsa_ctx->xf[idx].b.next = NULL;\n+\t\tsa->xforms = &sa_ctx->xf[idx].a;\n+\n+\t\tsa->crypto_session = rte_cryptodev_session_create(cdev_id,\n+\t\t\t\tsa->xforms);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],\n+\t\tunsigned nb_entries, uint16_t cdev_id)\n+{\n+\treturn sa_add_rules(sa_ctx, entries, nb_entries, cdev_id, 0);\n+}\n+\n+static inline int\n+sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],\n+\t\tunsigned nb_entries, uint16_t cdev_id)\n+{\n+\treturn sa_add_rules(sa_ctx, entries, nb_entries, cdev_id, 1);\n+}\n+\n+void\n+sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep, uint16_t cdev_id)\n+{\n+\tconst struct ipsec_sa *sa_out_entries, *sa_in_entries;\n+\tunsigned nb_out_entries, nb_in_entries;\n+\tconst char *name;\n+\n+\tif (ctx == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"NULL context.\\n\");\n+\n+\tif (ctx->sa_ipv4_in != NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Inbound SA DB for socket %u already \"\n+\t\t\t\t\"initialized\\n\", socket_id);\n+\n+\tif (ctx->sa_ipv4_out != NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Outbound SA DB for socket %u already \"\n+\t\t\t\t\"initialized\\n\", socket_id);\n+\n+\tif (ep == 0) {\n+\t\tsa_out_entries = sa_ep0_out;\n+\t\tnb_out_entries = RTE_DIM(sa_ep0_out);\n+\t\tsa_in_entries = sa_ep0_in;\n+\t\tnb_in_entries = RTE_DIM(sa_ep0_in);\n+\t} else if (ep == 1) {\n+\t\tsa_out_entries = sa_ep1_out;\n+\t\tnb_out_entries = RTE_DIM(sa_ep1_out);\n+\t\tsa_in_entries = sa_ep1_in;\n+\t\tnb_in_entries = RTE_DIM(sa_ep1_in);\n+\t} else\n+\t\trte_exit(EXIT_FAILURE, \"Invalid EP value %u. \"\n+\t\t\t\t\"Only 0 or 1 supported.\\n\", ep);\n+\n+\tname = \"sa_ipv4_in\";\n+\tctx->sa_ipv4_in = sa_ipv4_create(name, socket_id);\n+\tif (ctx->sa_ipv4_in == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Error [%d] creating SA context %s \"\n+\t\t\t\t\"in socket %d\\n\", rte_errno, name, socket_id);\n+\n+\tname = \"sa_ipv4_out\";\n+\tctx->sa_ipv4_out = sa_ipv4_create(name, socket_id);\n+\tif (ctx->sa_ipv4_out == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Error [%d] creating SA context %s \"\n+\t\t\t\t\"in socket %d\\n\", rte_errno, name, socket_id);\n+\n+\tsa_in_add_rules(ctx->sa_ipv4_in, sa_in_entries,\n+\t\t\tnb_in_entries, cdev_id);\n+\n+\tsa_out_add_rules(ctx->sa_ipv4_out, sa_out_entries,\n+\t\t\tnb_out_entries, cdev_id);\n+}\n+\n+int\n+inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)\n+{\n+\tstruct ipsec_mbuf_metadata *priv;\n+\n+\tpriv = RTE_PTR_ADD(m, sizeof(struct rte_mbuf));\n+\n+\treturn (sa_ctx->sa[sa_idx].spi == priv->sa->spi);\n+}\n+\n+void\n+inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],\n+\t\tstruct ipsec_sa *sa[], uint16_t nb_pkts)\n+{\n+\tunsigned i;\n+\tuint32_t *src, spi;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tspi = rte_pktmbuf_mtod_offset(pkts[i], struct esp_hdr *,\n+\t\t\t\tsizeof(struct ip))->spi;\n+\t\tif (spi == INVALID_SPI)\n+\t\t\tcontinue;\n+\n+\t\tsa[i] = &sa_ctx->sa[SPI2IDX(spi)];\n+\t\tif (spi != sa[i]->spi) {\n+\t\t\tsa[i] = NULL;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tsrc = rte_pktmbuf_mtod_offset(pkts[i], uint32_t *,\n+\t\t\t\toffsetof(struct ip, ip_src));\n+\t\tif ((sa[i]->src != *src) || (sa[i]->dst != *(src + 1)))\n+\t\t\tsa[i] = NULL;\n+\t}\n+}\n+\n+void\n+outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],\n+\t\tstruct ipsec_sa *sa[], uint16_t nb_pkts)\n+{\n+\tunsigned i;\n+\n+\tfor (i = 0; i < nb_pkts; i++)\n+\t\tsa[i] = &sa_ctx->sa[sa_idx[i]];\n+}\ndiff --git a/examples/ipsec-secgw/sp.c b/examples/ipsec-secgw/sp.c\nnew file mode 100644\nindex 0000000..219e478\n--- /dev/null\n+++ b/examples/ipsec-secgw/sp.c\n@@ -0,0 +1,324 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Security Policies\n+ */\n+#include <netinet/ip.h>\n+\n+#include <rte_acl.h>\n+\n+#include \"ipsec.h\"\n+\n+#define MAX_ACL_RULE_NUM\t1000\n+\n+/*\n+ * Rule and trace formats definitions.\n+ */\n+enum {\n+\tPROTO_FIELD_IPV4,\n+\tSRC_FIELD_IPV4,\n+\tDST_FIELD_IPV4,\n+\tSRCP_FIELD_IPV4,\n+\tDSTP_FIELD_IPV4,\n+\tNUM_FIELDS_IPV4\n+};\n+\n+/*\n+ * That effectively defines order of IPV4 classifications:\n+ *  - PROTO\n+ *  - SRC IP ADDRESS\n+ *  - DST IP ADDRESS\n+ *  - PORTS (SRC and DST)\n+ */\n+enum {\n+\tRTE_ACL_IPV4_PROTO,\n+\tRTE_ACL_IPV4_SRC,\n+\tRTE_ACL_IPV4_DST,\n+\tRTE_ACL_IPV4_PORTS,\n+\tRTE_ACL_IPV4_NUM\n+};\n+\n+struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {\n+\t{\n+\t.type = RTE_ACL_FIELD_TYPE_BITMASK,\n+\t.size = sizeof(uint8_t),\n+\t.field_index = PROTO_FIELD_IPV4,\n+\t.input_index = RTE_ACL_IPV4_PROTO,\n+\t.offset = 0,\n+\t},\n+\t{\n+\t.type = RTE_ACL_FIELD_TYPE_MASK,\n+\t.size = sizeof(uint32_t),\n+\t.field_index = SRC_FIELD_IPV4,\n+\t.input_index = RTE_ACL_IPV4_SRC,\n+\t.offset = offsetof(struct ip, ip_src) -\toffsetof(struct ip, ip_p)\n+\t},\n+\t{\n+\t.type = RTE_ACL_FIELD_TYPE_MASK,\n+\t.size = sizeof(uint32_t),\n+\t.field_index = DST_FIELD_IPV4,\n+\t.input_index = RTE_ACL_IPV4_DST,\n+\t.offset = offsetof(struct ip, ip_dst) - offsetof(struct ip, ip_p)\n+\t},\n+\t{\n+\t.type = RTE_ACL_FIELD_TYPE_RANGE,\n+\t.size = sizeof(uint16_t),\n+\t.field_index = SRCP_FIELD_IPV4,\n+\t.input_index = RTE_ACL_IPV4_PORTS,\n+\t.offset = sizeof(struct ip) - offsetof(struct ip, ip_p)\n+\t},\n+\t{\n+\t.type = RTE_ACL_FIELD_TYPE_RANGE,\n+\t.size = sizeof(uint16_t),\n+\t.field_index = DSTP_FIELD_IPV4,\n+\t.input_index = RTE_ACL_IPV4_PORTS,\n+\t.offset = sizeof(struct ip) - offsetof(struct ip, ip_p) +\n+\t\tsizeof(uint16_t)\n+\t},\n+};\n+\n+RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ipv4_defs));\n+\n+const struct acl4_rules acl4_rules_in[] = {\n+\t{\n+\t.data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 105, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t},\n+\t{\n+\t.data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 106, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t},\n+\t{\n+\t.data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 107, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t},\n+\t{\n+\t.data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 108, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t}\n+};\n+\n+const struct acl4_rules acl4_rules_out[] = {\n+\t{\n+\t.data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 115, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t},\n+\t{\n+\t.data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 116, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t},\n+\t{\n+\t.data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 117, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t},\n+\t{\n+\t.data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},\n+\t/* destination IPv4 */\n+\t.field[2] = {.value.u32 = IPv4(192, 168, 118, 0),\n+\t\t\t\t.mask_range.u32 = 24,},\n+\t/* source port */\n+\t.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},\n+\t/* destination port */\n+\t.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}\n+\t}\n+};\n+\n+static void\n+print_one_ipv4_rule(const struct acl4_rules *rule, int extra)\n+{\n+\tunsigned char a, b, c, d;\n+\n+\tuint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32,\n+\t\t\t&a, &b, &c, &d);\n+\tprintf(\"%hhu.%hhu.%hhu.%hhu/%u \", a, b, c, d,\n+\t\t\trule->field[SRC_FIELD_IPV4].mask_range.u32);\n+\tuint32_t_to_char(rule->field[DST_FIELD_IPV4].value.u32,\n+\t\t\t&a, &b, &c, &d);\n+\tprintf(\"%hhu.%hhu.%hhu.%hhu/%u \", a, b, c, d,\n+\t\t\trule->field[DST_FIELD_IPV4].mask_range.u32);\n+\tprintf(\"%hu : %hu %hu : %hu 0x%hhx/0x%hhx \",\n+\t\trule->field[SRCP_FIELD_IPV4].value.u16,\n+\t\trule->field[SRCP_FIELD_IPV4].mask_range.u16,\n+\t\trule->field[DSTP_FIELD_IPV4].value.u16,\n+\t\trule->field[DSTP_FIELD_IPV4].mask_range.u16,\n+\t\trule->field[PROTO_FIELD_IPV4].value.u8,\n+\t\trule->field[PROTO_FIELD_IPV4].mask_range.u8);\n+\tif (extra)\n+\t\tprintf(\"0x%x-0x%x-0x%x \",\n+\t\t\trule->data.category_mask,\n+\t\t\trule->data.priority,\n+\t\t\trule->data.userdata);\n+}\n+\n+static inline void\n+dump_ipv4_rules(const struct acl4_rules *rule, int num, int extra)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < num; i++, rule++) {\n+\t\tprintf(\"\\t%d:\", i + 1);\n+\t\tprint_one_ipv4_rule(rule, extra);\n+\t\tprintf(\"\\n\");\n+\t}\n+}\n+\n+static struct rte_acl_ctx *\n+acl4_init(const char *name, int socketid, const struct acl4_rules *rules,\n+\t\tunsigned rules_nb)\n+{\n+\tchar s[PATH_MAX];\n+\tstruct rte_acl_param acl_param;\n+\tstruct rte_acl_config acl_build_param;\n+\tstruct rte_acl_ctx *ctx;\n+\n+\tprintf(\"Creating SP context with %u max rules\\n\", MAX_ACL_RULE_NUM);\n+\n+\tmemset(&acl_param, 0, sizeof(acl_param));\n+\n+\t/* Create ACL contexts */\n+\tsnprintf(s, sizeof(s), \"%s_%d\", name, socketid);\n+\n+\tprintf(\"IPv4 %s entries [%u]:\\n\", s, rules_nb);\n+\tdump_ipv4_rules(rules, rules_nb, 1);\n+\n+\tacl_param.name = s;\n+\tacl_param.socket_id = socketid;\n+\tacl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ipv4_defs));\n+\tacl_param.max_rule_num = MAX_ACL_RULE_NUM;\n+\n+\tctx = rte_acl_create(&acl_param);\n+\tif (ctx == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Failed to create ACL context\\n\");\n+\n+\tif (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules,\n+\t\t\t\trules_nb) < 0)\n+\t\trte_exit(EXIT_FAILURE, \"add rules failed\\n\");\n+\n+\t/* Perform builds */\n+\tmemset(&acl_build_param, 0, sizeof(acl_build_param));\n+\n+\tacl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;\n+\tacl_build_param.num_fields = rules_nb;\n+\tmemcpy(&acl_build_param.defs, ipv4_defs, sizeof(ipv4_defs));\n+\n+\tif (rte_acl_build(ctx, &acl_build_param) != 0)\n+\t\trte_exit(EXIT_FAILURE, \"Failed to build ACL trie\\n\");\n+\n+\trte_acl_dump(ctx);\n+\n+\treturn ctx;\n+}\n+\n+void\n+sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep)\n+{\n+\tconst char *name;\n+\tconst struct acl4_rules *rules_out, *rules_in;\n+\tunsigned nb_out_rules, nb_in_rules;\n+\n+\tif (ctx == NULL)\n+\t\trte_exit(EXIT_FAILURE, \"NULL context.\\n\");\n+\n+\tif (ctx->sp_ipv4_in != NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Inbound SP DB for socket %u already \"\n+\t\t\t\t\"initialized\\n\", socket_id);\n+\n+\tif (ctx->sp_ipv4_out != NULL)\n+\t\trte_exit(EXIT_FAILURE, \"Outbound SP DB for socket %u already \"\n+\t\t\t\t\"initialized\\n\", socket_id);\n+\n+\tif (ep == 0) {\n+\t\trules_out = acl4_rules_in;\n+\t\tnb_out_rules = RTE_DIM(acl4_rules_in);\n+\t\trules_in = acl4_rules_out;\n+\t\tnb_in_rules = RTE_DIM(acl4_rules_out);\n+\t} else if (ep == 1) {\n+\t\trules_out = acl4_rules_out;\n+\t\tnb_out_rules = RTE_DIM(acl4_rules_out);\n+\t\trules_in = acl4_rules_in;\n+\t\tnb_in_rules = RTE_DIM(acl4_rules_in);\n+\t} else\n+\t\trte_exit(EXIT_FAILURE, \"Invalid EP value %u. \"\n+\t\t\t\t\"Only 0 or 1 supported.\\n\", ep);\n+\n+\tname = \"sp_ipv4_in\";\n+\tctx->sp_ipv4_in = (struct sp_ctx *)acl4_init(name, socket_id,\n+\t\t\trules_in, nb_in_rules);\n+\n+\tname = \"sp_ipv4_out\";\n+\tctx->sp_ipv4_out = (struct sp_ctx *)acl4_init(name, socket_id,\n+\t\t\trules_out, nb_out_rules);\n+}\n",
    "prefixes": [
        "dpdk-dev"
    ]
}