get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/76931/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 76931,
    "url": "http://patches.dpdk.org/api/patches/76931/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200908201552.14423-3-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200908201552.14423-3-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200908201552.14423-3-getelson@nvidia.com",
    "date": "2020-09-08T20:15:49",
    "name": "[v2,2/4] ethdev: tunnel offload model",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "aa2e5bc34eb7276c64e24611c0a5a63316a76a35",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200908201552.14423-3-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 12033,
            "url": "http://patches.dpdk.org/api/series/12033/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12033",
            "date": "2020-09-08T20:15:47",
            "name": "Tunnel Offload API",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/12033/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/76931/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/76931/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B0A8DA04B1;\n\tTue,  8 Sep 2020 22:16:48 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1D9F01C0D4;\n\tTue,  8 Sep 2020 22:16:40 +0200 (CEST)",
            "from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com\n [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id D87DA1C0CC\n for <dev@dpdk.org>; Tue,  8 Sep 2020 22:16:37 +0200 (CEST)",
            "from hqpgpgate102.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate26.nvidia.com (using TLS: TLSv1.2, DES-CBC3-SHA)\n id <B5f57e6970003>; Tue, 08 Sep 2020 13:16:23 -0700",
            "from hqmail.nvidia.com ([172.20.161.6])\n by hqpgpgate102.nvidia.com (PGP Universal service);\n Tue, 08 Sep 2020 13:16:37 -0700",
            "from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13)\n with Microsoft SMTP Server (TLS) id 15.0.1473.3;\n Tue, 8 Sep 2020 20:16:18 +0000"
        ],
        "X-PGP-Universal": "processed;\n by hqpgpgate102.nvidia.com on Tue, 08 Sep 2020 13:16:37 -0700",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<matan@nvidia.com>, <rasland@nvidia.com>, <orika@nvidia.com>, \"Eli\n Britstein\" <elibr@mellanox.com>, Ori Kam <orika@mellanox.com>, John McNamara\n <john.mcnamara@intel.com>, Marko Kovacevic <marko.kovacevic@intel.com>, \"Ray\n Kinsella\" <mdr@ashroe.eu>, Neil Horman <nhorman@tuxdriver.com>, \"Thomas\n Monjalon\" <thomas@monjalon.net>, Ferruh Yigit <ferruh.yigit@intel.com>,\n Andrew Rybchenko <arybchenko@solarflare.com>",
        "Date": "Tue, 8 Sep 2020 23:15:49 +0300",
        "Message-ID": "<20200908201552.14423-3-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20200908201552.14423-1-getelson@nvidia.com>",
        "References": "<20200625160348.26220-1-getelson@mellanox.com>\n <20200908201552.14423-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"UTF-8\"",
        "Content-Transfer-Encoding": "quoted-printable",
        "X-Originating-IP": "[10.124.1.5]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1599596183; bh=qAayyp434N8yQtGUZoZ+aRSSCpRxpvlR695OuJxpt3o=;\n h=X-PGP-Universal:From:To:CC:Subject:Date:Message-ID:X-Mailer:\n In-Reply-To:References:MIME-Version:Content-Type:\n Content-Transfer-Encoding:X-Originating-IP:X-ClientProxiedBy;\n b=iY6qkfA82H39wrRPzPivx4BiTlOVuVwMNggN66IDSMycWTMxY8I8KXBkY1oy2dDZw\n VvvoiLpCrW1pXskkkmP7ziMe7C/smOvnOGf0oD8b2/E6uZ3klD8JZc1X0QqaGvGJHd\n 1JFHtz81FVrfPp4tQmQNZGTuQg1TzHwvbbD2wA6V3EaWfOxK0KGNf0qYnfvl0ROOd/\n rfsHKGWSPQIbSjIt5B6f695gCYowsv233BTwYg5R5dUA4ne+P/ZKrg97IOah//s0sO\n 2abPVC7HLfzW7CzvQTF4rVt4Rgywrp09MCxBOdt8mKXGtNcUG8NenHiSParUdnfpT4\n mP+czWviWqcAw==",
        "Subject": "[dpdk-dev] [PATCH v2 2/4] ethdev: tunnel offload model",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Eli Britstein <elibr@mellanox.com>\n\nRte_flow API provides the building blocks for vendor agnostic flow\nclassification offloads.  The rte_flow match and action primitives are\nfine grained, thus enabling DPDK applications the flexibility to\noffload network stacks and complex pipelines.\n\nApplications wishing to offload complex data structures (e.g. tunnel\nvirtual ports) are required to use the rte_flow primitives, such as\ngroup, meta, mark, tag and others to model their high level objects.\n\nThe hardware model design for high level software objects is not\ntrivial.  Furthermore, an optimal design is often vendor specific.\n\nThe goal of this API is to provide applications with the hardware\noffload model for common high level software objects which is optimal\nin regards to the underlying hardware.\n\nTunnel ports are the first of such objects.\n\nTunnel ports\n------------\nIngress processing of tunneled traffic requires the classification of\nthe tunnel type followed by a decap action.\n\nIn software, once a packet is decapsulated the in_port field is\nchanged to a virtual port representing the tunnel type. The outer\nheader fields are stored as packet metadata members and may be matched\nby proceeding flows.\n\nOpenvswitch, for example, uses two flows:\n1. classification flow - setting the virtual port representing the\ntunnel type For example: match on udp port 4789\nactions=tnl_pop(vxlan_vport)\n2. steering flow according to outer and inner header matches match on\nin_port=vxlan_vport and outer/inner header matches actions=forward to\np ort X The benefits of multi-flow tables are described in [1].\n\nOffloading tunnel ports\n-----------------------\nTunnel ports introduce a new stateless field that can be matched on.\nCurrently the rte_flow library provides an API to encap, decap and\nmatch on tunnel headers. However, there is no rte_flow primitive to\nset and match tunnel virtual ports.\n\nThere are several possible hardware models for offloading virtual\ntunnel port flows including, but not limited to, the following:\n1. Setting the virtual port on a hw register using the\nrte_flow_action_mark/ rte_flow_action_tag/rte_flow_set_meta objects.\n2. Mapping a virtual port to an rte_flow group\n3. Avoiding the need to match on transient objects by merging\nmulti-table flows to a single rte_flow rule.\n\nEvery approach has its pros and cons.  The preferred approach should\ntake into account the entire system architecture and is very often\nvendor specific.\n\nThe proposed rte_flow_tunnel_decap_set helper function (drafted below)\nis designed to provide a common, vendor agnostic, API for setting the\nvirtual port value.  The helper API enables PMD implementations to\nreturn vendor specific combination of rte_flow actions realizing the\nvendor's hardware model for setting a tunnel port.  Applications may\nappend the list of actions returned from the helper function when\ncreating an rte_flow rule in hardware.\n\nSimilarly, the rte_flow_tunnel_match helper (drafted below)\nallows for multiple hardware implementations to return a list of\nfte_flow items.\n\nMiss handling\n-------------\nPackets going through multiple rte_flow groups are exposed to hw\nmisses due to partial packet processing. In such cases, the software\nshould continue the packet's processing from the point where the\nhardware missed.\n\nWe propose a generic rte_flow_restore structure providing the state\nthat was stored in hardware when the packet missed.\n\nCurrently, the structure will provide the tunnel state of the packet\nthat missed, namely:\n1. The group id that missed\n2. The tunnel port that missed\n3. Tunnel information that was stored in memory (due to decap action).\nIn the future, we may add additional fields as more state may be\nstored in the device memory (e.g. ct_state).\n\nApplications may query the state via a new\nrte_flow_tunnel_get_restore_info(mbuf) API, thus allowing\na vendor specific implementation.\n\nVXLAN Code example:\nAssume application needs to do inner NAT on VXLAN packet.\nThe first  rule in group 0:\n\nflow create <port id> ingress group 0\n  pattern eth / ipv4 / udp dst is 4789 / vxlan / end\n  actions {pmd actions} / jump group 3 / end\n\nFirst VXLAN packet that arrives matches the rule in group 0 and jumps\nto group 3 In group 3 the packet will miss since there is no flow to\nmatch and will be uploaded to application.  Application  will call\nrte_flow_get_restore_info() to get the packet outer header.\nApplication will insert a new rule in group 3 to match outer and inner\nheaders:\n\nflow create <port id> ingress group 3\n  pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 /\n          udp dst 4789 / vxlan vni is 10 /\n          ipv4 dst is 184.1.2.3 / end\n  actions  set_ipv4_dst  186.1.1.1 / queue index 3 / end\n\nResulting of rules will be that VXLAN packet with vni=10, outer IPv4\ndst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received decaped\non queue 3 with IPv4 dst=186.1.1.1\n\nNote: Packet in group 3 is considered decaped. All actions in that\ngroup will be done on header that was inner before decap. Application\nmay specify outer header to be matched on.  It's PMD responsibility to\ntranslate these items to outer metadata.\n\nAPI usage:\n/**\n * 1. Initiate RTE flow tunnel object\n */\nconst struct rte_flow_tunnel tunnel = {\n  .type = RTE_FLOW_ITEM_TYPE_VXLAN,\n  .tun_id = 10,\n}\n\n/**\n * 2. Obtain PMD tunnel actions\n *\n * pmd_actions is an intermediate variable application uses to\n * compile actions array\n */\nstruct rte_flow_action **pmd_actions;\nrte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions,\n                              &num_pmd_actions, &error);\n\n/**\n * 3. offload the first  rule\n * matching on VXLAN traffic and jumps to group 3\n * (implicitly decaps packet)\n */\napp_actions  =   jump group 3\nrule_items = app_items;  /** eth / ipv4 / udp / vxlan  */\nrule_actions = { pmd_actions, app_actions };\nattr.group = 0;\nflow_1 = rte_flow_create(port_id, &attr,\n                         rule_items, rule_actions, &error);\n/**\n  * 4. after flow creation application does not need to keep tunnel\n  * action resources.\n  */\nrte_flow_tunnel_action_release(port_id, pmd_actions,\n                               num_pmd_actions);\n\n/**\n  * 5. After partially offloaded packet miss because there was no\n  * matching rule handle miss on group 3\n  */\nstruct rte_flow_restore_info info;\nrte_flow_get_restore_info(port_id, mbuf, &info, &error);\n\n/**\n * 6. Offload NAT rule:\n */\napp_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 /\n            vxlan vni is 10 / ipv4 dst is 184.1.2.3 }\napp_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 }\n\nrte_flow_tunnel_match(&info.tunnel, &pmd_items,\n                      &num_pmd_items,  &error);\nrule_items = {pmd_items, app_items};\nrule_actions = app_actions;\nattr.group = info.group_id;\nflow_2 = rte_flow_create(port_id, &attr,\n                         rule_items, rule_actions, &error);\n\n/**\n * 7. Release PMD items after rule creation\n */\nrte_flow_tunnel_item_release(port_id, pmd_items, num_pmd_items);\n\nReferences\n1. https://mails.dpdk.org/archives/dev/2020-June/index.html\n\nSigned-off-by: Eli Britstein <elibr@mellanox.com>\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Ori Kam <orika@nvidia.com>\n---\nv2:\n* Update commit log\n---\n doc/guides/prog_guide/rte_flow.rst       | 105 ++++++++++++\n lib/librte_ethdev/rte_ethdev_version.map |   5 +\n lib/librte_ethdev/rte_flow.c             | 112 +++++++++++++\n lib/librte_ethdev/rte_flow.h             | 195 +++++++++++++++++++++++\n lib/librte_ethdev/rte_flow_driver.h      |  32 ++++\n 5 files changed, 449 insertions(+)",
    "diff": "diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst\nindex 3e5cd1e0d8..827ea0ca76 100644\n--- a/doc/guides/prog_guide/rte_flow.rst\n+++ b/doc/guides/prog_guide/rte_flow.rst\n@@ -3018,6 +3018,111 @@ operations include:\n - Duplication of a complete flow rule description.\n - Pattern item or action name retrieval.\n \n+Tunneled traffic offload\n+~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+Provide software application with unified rules model for tunneled traffic\n+regardless underlying hardware.\n+\n+ - The model introduces a concept of a virtual tunnel port (VTP).\n+ - The model uses VTP to offload ingress tunneled network traffic \n+   with RTE flow rules.\n+ - The model is implemented as set of helper functions. Each PMD\n+   implements VTP offload according to underlying hardware offload\n+   capabilities.  Applications must query PMD for VTP flow\n+   items / actions before using in creation of a VTP flow rule.\n+\n+The model components:\n+\n+- Virtual Tunnel Port (VTP) is a stateless software object that\n+  describes tunneled network traffic.  VTP object usually contains\n+  descriptions of outer headers, tunnel headers and inner headers.\n+- Tunnel Steering flow Rule (TSR) detects tunneled packets and\n+  delegates them to tunnel processing infrastructure, implemented\n+  in PMD for optimal hardware utilization, for further processing.\n+- Tunnel Matching flow Rule (TMR) verifies packet configuration and\n+  runs offload actions in case of a match.\n+\n+Application actions:\n+\n+1 Initialize VTP object according to tunnel network parameters.\n+\n+2 Create TSR flow rule.\n+\n+2.1 Query PMD for VTP actions. Application can query for VTP actions more than once.\n+\n+  .. code-block:: c\n+\n+    int\n+    rte_flow_tunnel_decap_set(uint16_t port_id,\n+                              struct rte_flow_tunnel *tunnel,\n+                              struct rte_flow_action **pmd_actions,\n+                              uint32_t *num_of_pmd_actions,\n+                              struct rte_flow_error *error);\n+\n+2.2 Integrate PMD actions into TSR actions list.\n+\n+2.3 Create TSR flow rule.\n+\n+    .. code-block:: console\n+\n+      flow create <port> group 0 match {tunnel items} / end actions {PMD actions} / {App actions} / end\n+\n+3 Create TMR flow rule.\n+\n+3.1 Query PMD for VTP items. Application can query for VTP items more than once.\n+\n+    .. code-block:: c\n+\n+      int\n+      rte_flow_tunnel_match(uint16_t port_id,\n+                            struct rte_flow_tunnel *tunnel,\n+                            struct rte_flow_item **pmd_items,\n+                            uint32_t *num_of_pmd_items,\n+                            struct rte_flow_error *error);\n+\n+3.2 Integrate PMD items into TMR items list.\n+\n+3.3 Create TMR flow rule.\n+\n+    .. code-block:: console\n+\n+      flow create <port> group 0 match {PMD items} / {APP items} / end actions {offload actions} / end\n+\n+The model provides helper function call to restore packets that miss\n+tunnel TMR rules to its original state:\n+\n+.. code-block:: c\n+\n+  int\n+  rte_flow_get_restore_info(uint16_t port_id,\n+                            struct rte_mbuf *mbuf,\n+                            struct rte_flow_restore_info *info,\n+                            struct rte_flow_error *error);\n+\n+rte_tunnel object filled by the call inside\n+``rte_flow_restore_info *info parameter`` can be used by the application\n+to create new TMR rule for that tunnel.\n+\n+The model requirements:\n+\n+Software application must initialize\n+rte_tunnel object with tunnel parameters before calling\n+rte_flow_tunnel_decap_set() & rte_flow_tunnel_match().\n+\n+PMD actions array obtained in rte_flow_tunnel_decap_set() must be\n+released by application with rte_flow_action_release() call.\n+Application can release the actionsfter TSR rule was created.\n+\n+PMD items array obtained with rte_flow_tunnel_match() must be released\n+by application with rte_flow_item_release() call.  Application can\n+release the items after rule was created. However, if the application\n+needs to create additional TMR rule for the same tunnel it will need\n+to obtain PMD items again.\n+\n+Application cannot destroy rte_tunnel object before it releases all\n+PMD actions & PMD items referencing that tunnel.\n+\n Caveats\n -------\n \ndiff --git a/lib/librte_ethdev/rte_ethdev_version.map b/lib/librte_ethdev/rte_ethdev_version.map\nindex 1212a17d32..8bb6b99d4a 100644\n--- a/lib/librte_ethdev/rte_ethdev_version.map\n+++ b/lib/librte_ethdev/rte_ethdev_version.map\n@@ -241,6 +241,11 @@ EXPERIMENTAL {\n \t__rte_ethdev_trace_rx_burst;\n \t__rte_ethdev_trace_tx_burst;\n \trte_flow_get_aged_flows;\n+\trte_flow_tunnel_decap_set;\n+\trte_flow_tunnel_match;\n+\trte_flow_tunnel_get_restore_info;\n+\trte_flow_tunnel_action_decap_release;\n+\trte_flow_tunnel_item_release;\n };\n \n INTERNAL {\ndiff --git a/lib/librte_ethdev/rte_flow.c b/lib/librte_ethdev/rte_flow.c\nindex 9905426bc9..23e364f337 100644\n--- a/lib/librte_ethdev/rte_flow.c\n+++ b/lib/librte_ethdev/rte_flow.c\n@@ -1269,3 +1269,115 @@ rte_flow_get_aged_flows(uint16_t port_id, void **contexts,\n \t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t  NULL, rte_strerror(ENOTSUP));\n }\n+\n+int\n+rte_flow_tunnel_decap_set(uint16_t port_id,\n+\t\t\t  struct rte_flow_tunnel *tunnel,\n+\t\t\t  struct rte_flow_action **actions,\n+\t\t\t  uint32_t *num_of_actions,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->tunnel_decap_set)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->tunnel_decap_set(dev, tunnel, actions,\n+\t\t\t\t\t\t      num_of_actions, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_tunnel_match(uint16_t port_id,\n+\t\t      struct rte_flow_tunnel *tunnel,\n+\t\t      struct rte_flow_item **items,\n+\t\t      uint32_t *num_of_items,\n+\t\t      struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->tunnel_match)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->tunnel_match(dev, tunnel, items,\n+\t\t\t\t\t\t  num_of_items, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_tunnel_get_restore_info(uint16_t port_id,\n+\t\t\t\t struct rte_mbuf *m,\n+\t\t\t\t struct rte_flow_restore_info *restore_info,\n+\t\t\t\t struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->get_restore_info)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->get_restore_info(dev, m, restore_info,\n+\t\t\t\t\t\t      error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_tunnel_action_decap_release(uint16_t port_id,\n+\t\t\t\t     struct rte_flow_action *actions,\n+\t\t\t\t     uint32_t num_of_actions,\n+\t\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->action_release)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->action_release(dev, actions,\n+\t\t\t\t\t\t    num_of_actions, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_tunnel_item_release(uint16_t port_id,\n+\t\t\t     struct rte_flow_item *items,\n+\t\t\t     uint32_t num_of_items,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->item_release)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->item_release(dev, items,\n+\t\t\t\t\t\t  num_of_items, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\ndiff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h\nindex da8bfa5489..d485fb2f77 100644\n--- a/lib/librte_ethdev/rte_flow.h\n+++ b/lib/librte_ethdev/rte_flow.h\n@@ -3357,6 +3357,201 @@ int\n rte_flow_get_aged_flows(uint16_t port_id, void **contexts,\n \t\t\tuint32_t nb_contexts, struct rte_flow_error *error);\n \n+/* Tunnel has a type and the key information. */\n+struct rte_flow_tunnel {\n+\t/**\n+\t * Tunnel type, for example RTE_FLOW_ITEM_TYPE_VXLAN,\n+\t * RTE_FLOW_ITEM_TYPE_NVGRE etc.\n+\t */\n+\tenum rte_flow_item_type\ttype;\n+\tuint64_t tun_id; /**< Tunnel identification. */\n+\n+\tRTE_STD_C11\n+\tunion {\n+\t\tstruct {\n+\t\t\trte_be32_t src_addr; /**< IPv4 source address. */\n+\t\t\trte_be32_t dst_addr; /**< IPv4 destination address. */\n+\t\t} ipv4;\n+\t\tstruct {\n+\t\t\tuint8_t src_addr[16]; /**< IPv6 source address. */\n+\t\t\tuint8_t dst_addr[16]; /**< IPv6 destination address. */\n+\t\t} ipv6;\n+\t};\n+\trte_be16_t tp_src; /**< Tunnel port source. */\n+\trte_be16_t tp_dst; /**< Tunnel port destination. */\n+\tuint16_t   tun_flags; /**< Tunnel flags. */\n+\n+\tbool       is_ipv6; /**< True for valid IPv6 fields. Otherwise IPv4. */\n+\n+\t/**\n+\t * following members required to restore packet\n+\t * after miss\n+\t */\n+\tuint8_t    tos; /**< TOS for IPv4, TC for IPv6. */\n+\tuint8_t    ttl; /**< TTL for IPv4, HL for IPv6. */\n+\tuint32_t label; /**< Flow Label for IPv6. */\n+};\n+\n+/**\n+ * Indicate that the packet has a tunnel.\n+ */\n+#define RTE_FLOW_RESTORE_INFO_TUNNEL  (1ULL << 0)\n+\n+/**\n+ * Indicate that the packet has a non decapsulated tunnel header.\n+ */\n+#define RTE_FLOW_RESTORE_INFO_ENCAPSULATED  (1ULL << 1)\n+\n+/**\n+ * Indicate that the packet has a group_id.\n+ */\n+#define RTE_FLOW_RESTORE_INFO_GROUP_ID  (1ULL << 2)\n+\n+/**\n+ * Restore information structure to communicate the current packet processing\n+ * state when some of the processing pipeline is done in hardware and should\n+ * continue in software.\n+ */\n+struct rte_flow_restore_info {\n+\t/**\n+\t * Bitwise flags (RTE_FLOW_RESTORE_INFO_*) to indicate validation of\n+\t * other fields in struct rte_flow_restore_info.\n+\t */\n+\tuint64_t flags;\n+\tuint32_t group_id; /**< Group ID where packed missed */\n+\tstruct rte_flow_tunnel tunnel; /**< Tunnel information. */\n+};\n+\n+/**\n+ * Allocate an array of actions to be used in rte_flow_create, to implement\n+ * tunnel-decap-set for the given tunnel.\n+ * Sample usage:\n+ *   actions vxlan_decap / tunnel-decap-set(tunnel properties) /\n+ *            jump group 0 / end\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] tunnel\n+ *   Tunnel properties.\n+ * @param[out] actions\n+ *   Array of actions to be allocated by the PMD. This array should be\n+ *   concatenated with the actions array provided to rte_flow_create.\n+ * @param[out] num_of_actions\n+ *   Number of actions allocated.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_decap_set(uint16_t port_id,\n+\t\t\t  struct rte_flow_tunnel *tunnel,\n+\t\t\t  struct rte_flow_action **actions,\n+\t\t\t  uint32_t *num_of_actions,\n+\t\t\t  struct rte_flow_error *error);\n+\n+/**\n+ * Allocate an array of items to be used in rte_flow_create, to implement\n+ * tunnel-match for the given tunnel.\n+ * Sample usage:\n+ *   pattern tunnel-match(tunnel properties) / outer-header-matches /\n+ *           inner-header-matches / end\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] tunnel\n+ *   Tunnel properties.\n+ * @param[out] items\n+ *   Array of items to be allocated by the PMD. This array should be\n+ *   concatenated with the items array provided to rte_flow_create.\n+ * @param[out] num_of_items\n+ *   Number of items allocated.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_match(uint16_t port_id,\n+\t\t      struct rte_flow_tunnel *tunnel,\n+\t\t      struct rte_flow_item **items,\n+\t\t      uint32_t *num_of_items,\n+\t\t      struct rte_flow_error *error);\n+\n+/**\n+ * Populate the current packet processing state, if exists, for the given mbuf.\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] m\n+ *   Mbuf struct.\n+ * @param[out] info\n+ *   Restore information. Upon success contains the HW state.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_get_restore_info(uint16_t port_id,\n+\t\t\t\t struct rte_mbuf *m,\n+\t\t\t\t struct rte_flow_restore_info *info,\n+\t\t\t\t struct rte_flow_error *error);\n+\n+/**\n+ * Release the action array as allocated by rte_flow_tunnel_decap_set.\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] actions\n+ *   Array of actions to be released.\n+ * @param[in] num_of_actions\n+ *   Number of elements in actions array.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_action_decap_release(uint16_t port_id,\n+\t\t\t\t     struct rte_flow_action *actions,\n+\t\t\t\t     uint32_t num_of_actions,\n+\t\t\t\t     struct rte_flow_error *error);\n+\n+/**\n+ * Release the item array as allocated by rte_flow_tunnel_match.\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] items\n+ *   Array of items to be released.\n+ * @param[in] num_of_items\n+ *   Number of elements in item array.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_item_release(uint16_t port_id,\n+\t\t\t     struct rte_flow_item *items,\n+\t\t\t     uint32_t num_of_items,\n+\t\t\t     struct rte_flow_error *error);\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/librte_ethdev/rte_flow_driver.h b/lib/librte_ethdev/rte_flow_driver.h\nindex 881cc469b7..ad1d7a2cdc 100644\n--- a/lib/librte_ethdev/rte_flow_driver.h\n+++ b/lib/librte_ethdev/rte_flow_driver.h\n@@ -107,6 +107,38 @@ struct rte_flow_ops {\n \t\t void **context,\n \t\t uint32_t nb_contexts,\n \t\t struct rte_flow_error *err);\n+\t/** See rte_flow_tunnel_decap_set() */\n+\tint (*tunnel_decap_set)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_tunnel *tunnel,\n+\t\t struct rte_flow_action **pmd_actions,\n+\t\t uint32_t *num_of_actions,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_tunnel_match() */\n+\tint (*tunnel_match)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_tunnel *tunnel,\n+\t\t struct rte_flow_item **pmd_items,\n+\t\t uint32_t *num_of_items,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_get_rte_flow_restore_info() */\n+\tint (*get_restore_info)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_mbuf *m,\n+\t\t struct rte_flow_restore_info *info,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_action_tunnel_decap_release() */\n+\tint (*action_release)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_action *pmd_actions,\n+\t\t uint32_t num_of_actions,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_item_release() */\n+\tint (*item_release)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_item *pmd_items,\n+\t\t uint32_t num_of_items,\n+\t\t struct rte_flow_error *err);\n };\n \n /**\n",
    "prefixes": [
        "v2",
        "2/4"
    ]
}