get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/81090/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 81090,
    "url": "http://patches.dpdk.org/api/patches/81090/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201016103400.21311-3-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201016103400.21311-3-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201016103400.21311-3-getelson@nvidia.com",
    "date": "2020-10-16T10:33:59",
    "name": "[v7,2/3] ethdev: tunnel offload model",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "bcecd19c9b0df58462e89afda07b308d59845441",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201016103400.21311-3-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 13056,
            "url": "http://patches.dpdk.org/api/series/13056/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13056",
            "date": "2020-10-16T10:33:57",
            "name": "Tunnel Offload API",
            "version": 7,
            "mbox": "http://patches.dpdk.org/series/13056/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/81090/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/81090/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id EF22AA04DB;\n\tFri, 16 Oct 2020 12:35:23 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1A7F91ED1A;\n\tFri, 16 Oct 2020 12:34:47 +0200 (CEST)",
            "from hqnvemgate25.nvidia.com (hqnvemgate25.nvidia.com\n [216.228.121.64]) by dpdk.org (Postfix) with ESMTP id 67ACB1ED31\n for <dev@dpdk.org>; Fri, 16 Oct 2020 12:34:44 +0200 (CEST)",
            "from hqmail.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate25.nvidia.com (using TLS: TLSv1.2, AES256-SHA)\n id <B5f8977170000>; Fri, 16 Oct 2020 03:33:59 -0700",
            "from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13)\n with Microsoft SMTP Server (TLS) id 15.0.1473.3;\n Fri, 16 Oct 2020 10:34:22 +0000"
        ],
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,\n <elibr@nvidia.com>, <ozsh@nvidia.com>, <ajit.khaparde@broadcom.com>,\n <asafp@nvidia.com>, Eli Britstein <elibr@mellanox.com>, Ori Kam\n <orika@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>, \"Ray\n Kinsella\" <mdr@ashroe.eu>, Neil Horman <nhorman@tuxdriver.com>, \"Thomas\n Monjalon\" <thomas@monjalon.net>, Ferruh Yigit <ferruh.yigit@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>",
        "Date": "Fri, 16 Oct 2020 13:33:59 +0300",
        "Message-ID": "<20201016103400.21311-3-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.28.0",
        "In-Reply-To": "<20201016103400.21311-1-getelson@nvidia.com>",
        "References": "<20200625160348.26220-1-getelson@mellanox.com>\n <20201016103400.21311-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "quoted-printable",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.124.1.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1602844439; bh=oSXRYBwIhS9fp2114mmrtBud5Y14qrfgtTH3SH0OPjw=;\n h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To:\n References:MIME-Version:Content-Transfer-Encoding:Content-Type:\n X-Originating-IP:X-ClientProxiedBy;\n b=YeaWfCD33sEHwDd8tqg/EhzDwuDBGSu+2cafW0nerQGOMBK/GiejfDp4gRufHcBBY\n sSjd0sUIXZmeiA+J3wj5zJibyrEV6pz8j8uDwXsG2CRwdFpfxSM2ibatvMEK90CiAy\n zjRJ9Q7PVWH3DebrSMi/+vxuLLOpyfGN+q0Z9BHb+O5FC3ZOMQPxK8CMpXADdtUs7i\n 0lT0BHWFkw/qSdelL5mdzuCpaV1RW0V1tSoN9HVwpu10qiHvKtvIMejvg6zDYXp5WR\n D5y+W51TzjcvdCHYGKgWzgMzceWDuexG+opWjAJWMWJ5TEH/LzlO/C1eSifdTkBQ68\n lVZNPKOHPqSrQ==",
        "Subject": "[dpdk-dev] [PATCH v7 2/3] ethdev: tunnel offload model",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Eli Britstein <elibr@mellanox.com>\n\nrte_flow API provides the building blocks for vendor-agnostic flow\nclassification offloads. The rte_flow \"patterns\" and \"actions\"\nprimitives are fine-grained, thus enabling DPDK applications the\nflexibility to offload network stacks and complex pipelines.\nApplications wishing to offload tunneled traffic are required to use\nthe rte_flow primitives, such as group, meta, mark, tag, and others to\nmodel their high-level objects.  The hardware model design for\nhigh-level software objects is not trivial.  Furthermore, an optimal\ndesign is often vendor-specific.\n\nWhen hardware offloads tunneled traffic in multi-group logic,\npartially offloaded packets may arrive to the application after they\nwere modified in hardware. In this case, the application may need to\nrestore the original packet headers. Consider the following sequence:\nThe application decaps a packet in one group and jumps to a second\ngroup where it tries to match on a 5-tuple, that will miss and send\nthe packet to the application. In this case, the application does not\nreceive the original packet but a modified one. Also, in this case,\nthe application cannot match on the outer header fields, such as VXLAN\nvni and 5-tuple.\n\nThere are several possible ways to use rte_flow \"patterns\" and\n\"actions\" to resolve the issues above. For example:\n1 Mapping headers to a hardware registers using the\nrte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects.\n2 Apply the decap only at the last offload stage after all the\n\"patterns\" were matched and the packet will be fully offloaded.\nEvery approach has its pros and cons and is highly dependent on the\nhardware vendor.  For example, some hardware may have a limited number\nof registers while other hardware could not support inner actions and\nmust decap before accessing inner headers.\n\nThe tunnel offload model resolves these issues. The model goals are:\n1 Provide a unified application API to offload tunneled traffic that\nis capable to match on outer headers after decap.\n2 Allow the application to restore the outer header of partially\noffloaded packets.\n\nThe tunnel offload model does not introduce new elements to the\nexisting RTE flow model and is implemented as a set of helper\nfunctions.\n\nFor the application to work with the tunnel offload API it\nhas to adjust flow rules in multi-table tunnel offload in the\nfollowing way:\n1 Remove explicit call to decap action and replace it with PMD actions\nobtained from rte_flow_tunnel_decap_and_set() helper.\n2 Add PMD items obtained from rte_flow_tunnel_match() helper to all\nother rules in the tunnel offload sequence.\n\nVXLAN Code example:\n\nAssume application needs to do inner NAT on the VXLAN packet.\nThe first  rule in group 0:\n\nflow create <port id> ingress group 0\n  pattern eth / ipv4 / udp dst is 4789 / vxlan / end\n  actions {pmd actions} / jump group 3 / end\n\nThe first VXLAN packet that arrives matches the rule in group 0 and\njumps to group 3.  In group 3 the packet will miss since there is no\nflow to match and will be sent to the application.  Application  will\ncall rte_flow_get_restore_info() to get the packet outer header.\n\nApplication will insert a new rule in group 3 to match outer and inner\nheaders:\n\nflow create <port id> ingress group 3\n  pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 /\n          udp dst 4789 / vxlan vni is 10 /\n          ipv4 dst is 184.1.2.3 / end\n  actions  set_ipv4_dst  186.1.1.1 / queue index 3 / end\n\nResulting of the rules will be that VXLAN packet with vni=10, outer\nIPv4 dst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received\ndecapped on queue 3 with IPv4 dst=186.1.1.1\n\nNote: The packet in group 3 is considered decapped. All actions in\nthat group will be done on the header that was inner before decap. The\napplication may specify an outer header to be matched on.  It's PMD\nresponsibility to translate these items to outer metadata.\n\nAPI usage:\n\n/**\n * 1. Initiate RTE flow tunnel object\n */\nconst struct rte_flow_tunnel tunnel = {\n  .type = RTE_FLOW_ITEM_TYPE_VXLAN,\n  .tun_id = 10,\n}\n\n/**\n * 2. Obtain PMD tunnel actions\n *\n * pmd_actions is an intermediate variable application uses to\n * compile actions array\n */\nstruct rte_flow_action **pmd_actions;\nrte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions,\n                              &num_pmd_actions, &error);\n/**\n * 3. offload the first  rule\n * matching on VXLAN traffic and jumps to group 3\n * (implicitly decaps packet)\n */\napp_actions  =   jump group 3\nrule_items = app_items;  /** eth / ipv4 / udp / vxlan  */\nrule_actions = { pmd_actions, app_actions };\nattr.group = 0;\nflow_1 = rte_flow_create(port_id, &attr,\n                         rule_items, rule_actions, &error);\n\n/**\n  * 4. after flow creation application does not need to keep the\n  * tunnel action resources.\n  */\nrte_flow_tunnel_action_release(port_id, pmd_actions,\n                               num_pmd_actions);\n/**\n  * 5. After partially offloaded packet miss because there was no\n  * matching rule handle miss on group 3\n  */\nstruct rte_flow_restore_info info;\nrte_flow_get_restore_info(port_id, mbuf, &info, &error);\n\n/**\n * 6. Offload NAT rule:\n */\napp_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 /\n            vxlan vni is 10 / ipv4 dst is 184.1.2.3 }\napp_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 }\n\nrte_flow_tunnel_match(&info.tunnel, &pmd_items,\n                      &num_pmd_items,  &error);\nrule_items = {pmd_items, app_items};\nrule_actions = app_actions;\nattr.group = info.group_id;\nflow_2 = rte_flow_create(port_id, &attr,\n                         rule_items, rule_actions, &error);\n\n/**\n * 7. Release PMD items after rule creation\n */\nrte_flow_tunnel_item_release(port_id,\n                             pmd_items, num_pmd_items);\n\nReferences\n1. https://mails.dpdk.org/archives/dev/2020-June/index.html\n\nSigned-off-by: Eli Britstein <elibr@mellanox.com>\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Ori Kam <orika@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n\n---\nv5:\n* rebase to next-net\n\nv6:\n* update the patch comment\n* update tunnel offload section in rte_flow.rst\n---\n doc/guides/prog_guide/rte_flow.rst       |  78 +++++++++\n doc/guides/rel_notes/release_20_11.rst   |   5 +\n lib/librte_ethdev/rte_ethdev_version.map |   5 +\n lib/librte_ethdev/rte_flow.c             | 112 +++++++++++++\n lib/librte_ethdev/rte_flow.h             | 195 +++++++++++++++++++++++\n lib/librte_ethdev/rte_flow_driver.h      |  32 ++++\n 6 files changed, 427 insertions(+)",
    "diff": "diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst\nindex 7fb5ec9059..8dc048c6f4 100644\n--- a/doc/guides/prog_guide/rte_flow.rst\n+++ b/doc/guides/prog_guide/rte_flow.rst\n@@ -3131,6 +3131,84 @@ operations include:\n - Duplication of a complete flow rule description.\n - Pattern item or action name retrieval.\n \n+Tunneled traffic offload\n+~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+rte_flow API provides the building blocks for vendor-agnostic flow\n+classification offloads. The rte_flow \"patterns\" and \"actions\"\n+primitives are fine-grained, thus enabling DPDK applications the\n+flexibility to offload network stacks and complex pipelines.\n+Applications wishing to offload tunneled traffic are required to use\n+the rte_flow primitives, such as group, meta, mark, tag, and others to\n+model their high-level objects.  The hardware model design for\n+high-level software objects is not trivial.  Furthermore, an optimal\n+design is often vendor-specific.\n+\n+When hardware offloads tunneled traffic in multi-group logic,\n+partially offloaded packets may arrive to the application after they\n+were modified in hardware. In this case, the application may need to\n+restore the original packet headers. Consider the following sequence:\n+The application decaps a packet in one group and jumps to a second\n+group where it tries to match on a 5-tuple, that will miss and send\n+the packet to the application. In this case, the application does not\n+receive the original packet but a modified one. Also, in this case,\n+the application cannot match on the outer header fields, such as VXLAN\n+vni and 5-tuple.\n+\n+There are several possible ways to use rte_flow \"patterns\" and\n+\"actions\" to resolve the issues above. For example:\n+\n+1 Mapping headers to a hardware registers using the\n+rte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects.\n+\n+2 Apply the decap only at the last offload stage after all the\n+\"patterns\" were matched and the packet will be fully offloaded.\n+\n+Every approach has its pros and cons and is highly dependent on the\n+hardware vendor.  For example, some hardware may have a limited number\n+of registers while other hardware could not support inner actions and\n+must decap before accessing inner headers.\n+\n+The tunnel offload model resolves these issues. The model goals are:\n+\n+1 Provide a unified application API to offload tunneled traffic that\n+is capable to match on outer headers after decap.\n+\n+2 Allow the application to restore the outer header of partially\n+offloaded packets.\n+\n+The tunnel offload model does not introduce new elements to the\n+existing RTE flow model and is implemented as a set of helper\n+functions.\n+\n+For the application to work with the tunnel offload API it\n+has to adjust flow rules in multi-table tunnel offload in the\n+following way:\n+\n+1 Remove explicit call to decap action and replace it with PMD actions\n+obtained from rte_flow_tunnel_decap_and_set() helper.\n+\n+2 Add PMD items obtained from rte_flow_tunnel_match() helper to all\n+other rules in the tunnel offload sequence.\n+\n+The model requirements:\n+\n+Software application must initialize\n+rte_tunnel object with tunnel parameters before calling\n+rte_flow_tunnel_decap_set() & rte_flow_tunnel_match().\n+\n+PMD actions array obtained in rte_flow_tunnel_decap_set() must be\n+released by application with rte_flow_action_release() call.\n+\n+PMD items array obtained with rte_flow_tunnel_match() must be released\n+by application with rte_flow_item_release() call.  Application can\n+release PMD items and actions after rule was created. However, if the\n+application needs to create additional rule for the same tunnel it\n+will need to obtain PMD items again.\n+\n+Application cannot destroy rte_tunnel object before it releases all\n+PMD actions & PMD items referencing that tunnel.\n+\n Caveats\n -------\n \ndiff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst\nindex 9155b468d6..f125ce79dd 100644\n--- a/doc/guides/rel_notes/release_20_11.rst\n+++ b/doc/guides/rel_notes/release_20_11.rst\n@@ -121,6 +121,11 @@ New Features\n   * Flow rule verification was updated to accept private PMD\n     items and actions.\n \n+* **Added generic API to offload tunneled traffic and restore missed packet.**\n+\n+  * Added a new hardware independent helper API to RTE flow library that\n+    offloads tunneled traffic and restores missed packets.\n+\n * **Updated Cisco enic driver.**\n \n   * Added support for VF representors with single-queue Tx/Rx and flow API\ndiff --git a/lib/librte_ethdev/rte_ethdev_version.map b/lib/librte_ethdev/rte_ethdev_version.map\nindex f64c379ac2..8ddda2547f 100644\n--- a/lib/librte_ethdev/rte_ethdev_version.map\n+++ b/lib/librte_ethdev/rte_ethdev_version.map\n@@ -239,6 +239,11 @@ EXPERIMENTAL {\n \trte_flow_shared_action_destroy;\n \trte_flow_shared_action_query;\n \trte_flow_shared_action_update;\n+\trte_flow_tunnel_decap_set;\n+\trte_flow_tunnel_match;\n+\trte_flow_get_restore_info;\n+\trte_flow_tunnel_action_decap_release;\n+\trte_flow_tunnel_item_release;\n };\n \n INTERNAL {\ndiff --git a/lib/librte_ethdev/rte_flow.c b/lib/librte_ethdev/rte_flow.c\nindex b74ea5593a..380c5cae2c 100644\n--- a/lib/librte_ethdev/rte_flow.c\n+++ b/lib/librte_ethdev/rte_flow.c\n@@ -1143,3 +1143,115 @@ rte_flow_shared_action_query(uint16_t port_id,\n \t\t\t\t       data, error);\n \treturn flow_err(port_id, ret, error);\n }\n+\n+int\n+rte_flow_tunnel_decap_set(uint16_t port_id,\n+\t\t\t  struct rte_flow_tunnel *tunnel,\n+\t\t\t  struct rte_flow_action **actions,\n+\t\t\t  uint32_t *num_of_actions,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->tunnel_decap_set)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->tunnel_decap_set(dev, tunnel, actions,\n+\t\t\t\t\t\t      num_of_actions, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_tunnel_match(uint16_t port_id,\n+\t\t      struct rte_flow_tunnel *tunnel,\n+\t\t      struct rte_flow_item **items,\n+\t\t      uint32_t *num_of_items,\n+\t\t      struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->tunnel_match)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->tunnel_match(dev, tunnel, items,\n+\t\t\t\t\t\t  num_of_items, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_get_restore_info(uint16_t port_id,\n+\t\t\t  struct rte_mbuf *m,\n+\t\t\t  struct rte_flow_restore_info *restore_info,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->get_restore_info)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->get_restore_info(dev, m, restore_info,\n+\t\t\t\t\t\t      error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_tunnel_action_decap_release(uint16_t port_id,\n+\t\t\t\t     struct rte_flow_action *actions,\n+\t\t\t\t     uint32_t num_of_actions,\n+\t\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->action_release)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->action_release(dev, actions,\n+\t\t\t\t\t\t    num_of_actions, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\n+\n+int\n+rte_flow_tunnel_item_release(uint16_t port_id,\n+\t\t\t     struct rte_flow_item *items,\n+\t\t\t     uint32_t num_of_items,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);\n+\n+\tif (unlikely(!ops))\n+\t\treturn -rte_errno;\n+\tif (likely(!!ops->item_release)) {\n+\t\treturn flow_err(port_id,\n+\t\t\t\tops->item_release(dev, items,\n+\t\t\t\t\t\t  num_of_items, error),\n+\t\t\t\terror);\n+\t}\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, rte_strerror(ENOTSUP));\n+}\ndiff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h\nindex 48395284b5..a8eac4deb8 100644\n--- a/lib/librte_ethdev/rte_flow.h\n+++ b/lib/librte_ethdev/rte_flow.h\n@@ -3620,6 +3620,201 @@ rte_flow_shared_action_query(uint16_t port_id,\n \t\t\t     void *data,\n \t\t\t     struct rte_flow_error *error);\n \n+/* Tunnel has a type and the key information. */\n+struct rte_flow_tunnel {\n+\t/**\n+\t * Tunnel type, for example RTE_FLOW_ITEM_TYPE_VXLAN,\n+\t * RTE_FLOW_ITEM_TYPE_NVGRE etc.\n+\t */\n+\tenum rte_flow_item_type\ttype;\n+\tuint64_t tun_id; /**< Tunnel identification. */\n+\n+\tRTE_STD_C11\n+\tunion {\n+\t\tstruct {\n+\t\t\trte_be32_t src_addr; /**< IPv4 source address. */\n+\t\t\trte_be32_t dst_addr; /**< IPv4 destination address. */\n+\t\t} ipv4;\n+\t\tstruct {\n+\t\t\tuint8_t src_addr[16]; /**< IPv6 source address. */\n+\t\t\tuint8_t dst_addr[16]; /**< IPv6 destination address. */\n+\t\t} ipv6;\n+\t};\n+\trte_be16_t tp_src; /**< Tunnel port source. */\n+\trte_be16_t tp_dst; /**< Tunnel port destination. */\n+\tuint16_t   tun_flags; /**< Tunnel flags. */\n+\n+\tbool       is_ipv6; /**< True for valid IPv6 fields. Otherwise IPv4. */\n+\n+\t/**\n+\t * the following members are required to restore packet\n+\t * after miss\n+\t */\n+\tuint8_t    tos; /**< TOS for IPv4, TC for IPv6. */\n+\tuint8_t    ttl; /**< TTL for IPv4, HL for IPv6. */\n+\tuint32_t label; /**< Flow Label for IPv6. */\n+};\n+\n+/**\n+ * Indicate that the packet has a tunnel.\n+ */\n+#define RTE_FLOW_RESTORE_INFO_TUNNEL  (1ULL << 0)\n+\n+/**\n+ * Indicate that the packet has a non decapsulated tunnel header.\n+ */\n+#define RTE_FLOW_RESTORE_INFO_ENCAPSULATED  (1ULL << 1)\n+\n+/**\n+ * Indicate that the packet has a group_id.\n+ */\n+#define RTE_FLOW_RESTORE_INFO_GROUP_ID  (1ULL << 2)\n+\n+/**\n+ * Restore information structure to communicate the current packet processing\n+ * state when some of the processing pipeline is done in hardware and should\n+ * continue in software.\n+ */\n+struct rte_flow_restore_info {\n+\t/**\n+\t * Bitwise flags (RTE_FLOW_RESTORE_INFO_*) to indicate validation of\n+\t * other fields in struct rte_flow_restore_info.\n+\t */\n+\tuint64_t flags;\n+\tuint32_t group_id; /**< Group ID where packed missed */\n+\tstruct rte_flow_tunnel tunnel; /**< Tunnel information. */\n+};\n+\n+/**\n+ * Allocate an array of actions to be used in rte_flow_create, to implement\n+ * tunnel-decap-set for the given tunnel.\n+ * Sample usage:\n+ *   actions vxlan_decap / tunnel-decap-set(tunnel properties) /\n+ *            jump group 0 / end\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] tunnel\n+ *   Tunnel properties.\n+ * @param[out] actions\n+ *   Array of actions to be allocated by the PMD. This array should be\n+ *   concatenated with the actions array provided to rte_flow_create.\n+ * @param[out] num_of_actions\n+ *   Number of actions allocated.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_decap_set(uint16_t port_id,\n+\t\t\t  struct rte_flow_tunnel *tunnel,\n+\t\t\t  struct rte_flow_action **actions,\n+\t\t\t  uint32_t *num_of_actions,\n+\t\t\t  struct rte_flow_error *error);\n+\n+/**\n+ * Allocate an array of items to be used in rte_flow_create, to implement\n+ * tunnel-match for the given tunnel.\n+ * Sample usage:\n+ *   pattern tunnel-match(tunnel properties) / outer-header-matches /\n+ *           inner-header-matches / end\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] tunnel\n+ *   Tunnel properties.\n+ * @param[out] items\n+ *   Array of items to be allocated by the PMD. This array should be\n+ *   concatenated with the items array provided to rte_flow_create.\n+ * @param[out] num_of_items\n+ *   Number of items allocated.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_match(uint16_t port_id,\n+\t\t      struct rte_flow_tunnel *tunnel,\n+\t\t      struct rte_flow_item **items,\n+\t\t      uint32_t *num_of_items,\n+\t\t      struct rte_flow_error *error);\n+\n+/**\n+ * Populate the current packet processing state, if exists, for the given mbuf.\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] m\n+ *   Mbuf struct.\n+ * @param[out] info\n+ *   Restore information. Upon success contains the HW state.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_get_restore_info(uint16_t port_id,\n+\t\t\t  struct rte_mbuf *m,\n+\t\t\t  struct rte_flow_restore_info *info,\n+\t\t\t  struct rte_flow_error *error);\n+\n+/**\n+ * Release the action array as allocated by rte_flow_tunnel_decap_set.\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] actions\n+ *   Array of actions to be released.\n+ * @param[in] num_of_actions\n+ *   Number of elements in actions array.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_action_decap_release(uint16_t port_id,\n+\t\t\t\t     struct rte_flow_action *actions,\n+\t\t\t\t     uint32_t num_of_actions,\n+\t\t\t\t     struct rte_flow_error *error);\n+\n+/**\n+ * Release the item array as allocated by rte_flow_tunnel_match.\n+ *\n+ * @param port_id\n+ *   Port identifier of Ethernet device.\n+ * @param[in] items\n+ *   Array of items to be released.\n+ * @param[in] num_of_items\n+ *   Number of elements in item array.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. PMDs initialize this\n+ *   structure in case of error only.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+__rte_experimental\n+int\n+rte_flow_tunnel_item_release(uint16_t port_id,\n+\t\t\t     struct rte_flow_item *items,\n+\t\t\t     uint32_t num_of_items,\n+\t\t\t     struct rte_flow_error *error);\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/librte_ethdev/rte_flow_driver.h b/lib/librte_ethdev/rte_flow_driver.h\nindex 58f56b0262..bd5ffc0bb1 100644\n--- a/lib/librte_ethdev/rte_flow_driver.h\n+++ b/lib/librte_ethdev/rte_flow_driver.h\n@@ -131,6 +131,38 @@ struct rte_flow_ops {\n \t\t const struct rte_flow_shared_action *shared_action,\n \t\t void *data,\n \t\t struct rte_flow_error *error);\n+\t/** See rte_flow_tunnel_decap_set() */\n+\tint (*tunnel_decap_set)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_tunnel *tunnel,\n+\t\t struct rte_flow_action **pmd_actions,\n+\t\t uint32_t *num_of_actions,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_tunnel_match() */\n+\tint (*tunnel_match)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_tunnel *tunnel,\n+\t\t struct rte_flow_item **pmd_items,\n+\t\t uint32_t *num_of_items,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_get_rte_flow_restore_info() */\n+\tint (*get_restore_info)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_mbuf *m,\n+\t\t struct rte_flow_restore_info *info,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_action_tunnel_decap_release() */\n+\tint (*action_release)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_action *pmd_actions,\n+\t\t uint32_t num_of_actions,\n+\t\t struct rte_flow_error *err);\n+\t/** See rte_flow_item_release() */\n+\tint (*item_release)\n+\t\t(struct rte_eth_dev *dev,\n+\t\t struct rte_flow_item *pmd_items,\n+\t\t uint32_t num_of_items,\n+\t\t struct rte_flow_error *err);\n };\n \n /**\n",
    "prefixes": [
        "v7",
        "2/3"
    ]
}