get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/92134/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 92134,
    "url": "http://patches.dpdk.org/api/patches/92134/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210425155722.32477-1-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210425155722.32477-1-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210425155722.32477-1-getelson@nvidia.com",
    "date": "2021-04-25T15:57:21",
    "name": "[v2,1/2] net/mlx5: fix tunnel offload private items location",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "42c9f6a49d8ba76109d78628308bd2938603a540",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210425155722.32477-1-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 16660,
            "url": "http://patches.dpdk.org/api/series/16660/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=16660",
            "date": "2021-04-25T15:57:21",
            "name": "[v2,1/2] net/mlx5: fix tunnel offload private items location",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/16660/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/92134/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/92134/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 882A5A0548;\n\tSun, 25 Apr 2021 17:57:45 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 6CE8E4113E;\n\tSun, 25 Apr 2021 17:57:45 +0200 (CEST)",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12on2052.outbound.protection.outlook.com [40.107.244.52])\n by mails.dpdk.org (Postfix) with ESMTP id 9004041139;\n Sun, 25 Apr 2021 17:57:44 +0200 (CEST)",
            "from DS7PR03CA0091.namprd03.prod.outlook.com (2603:10b6:5:3b7::6) by\n BY5PR12MB3922.namprd12.prod.outlook.com (2603:10b6:a03:195::30) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4065.25; Sun, 25 Apr\n 2021 15:57:42 +0000",
            "from DM6NAM11FT018.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:3b7:cafe::50) by DS7PR03CA0091.outlook.office365.com\n (2603:10b6:5:3b7::6) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4065.21 via Frontend\n Transport; Sun, 25 Apr 2021 15:57:42 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT018.mail.protection.outlook.com (10.13.172.110) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4065.21 via Frontend Transport; Sun, 25 Apr 2021 15:57:41 +0000",
            "from nvidia.com (172.20.145.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Sun, 25 Apr\n 2021 15:57:39 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=hvH7wbmi3WXO36vjT/+RO6vo58/3byqKR7nWgcTZxCekvAC0uPKlxUWS1s+w5ceHFpmfNeyXQQ/pURnFcQUNFWX1tmxKsvc0n7PZFrUQcUpTnXMweGhhKo38zonBaTzdjGO0TOBs0e5hgUET43V+z7XPrtrsPslD9Fic+MjAaQgmxMpRZWil4N4ZQO6JP6ZJafHYTgBTFfUHq44qvIUqZH5+qTgan/2GxShTFC8O6br0HRnX33phvDFHbIex/m753D6kx6Ph8ZC7E5ZwDuRnjO6CTsNcLBrSACdsRbQn4Ybf4XcFvI5XySf99vinBNgUUbZIMrjriItur4ABLSQ09g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=gVnr34nPFceLyihFGFa4YIMPVvnU+LabOE5jLmitla8=;\n b=E5M1Mm0IJ1z3FAWBFvbWscqB/MrtJKzlTRuPHUZllru+28raPET+3MrNns2+BHTYD/LdFsfsxlxL/1u7u7WpyFRYjrC6ynBppPBPjm2/0RpKNbQ5D0N4c25r5sxnev8VTIhBXH6+4j1BHBoQ2gY52NGaXe65YXbjC5A2PsLw3jgebW1d0ewMfZqXk9MIFofYaJClIF7tIJKVFhEHDSOv0tGPP/hBAmAHB8W8WyR6FF7KJBPBrMRMl7atlKolP7ubU4mE1fy+3nPe/lUe/KfqfPHM5QTJV/lVbvBNK1E5qBbyl+u2hL0DaXU3WjUY3jzNhNask5GR+U4qucVejI9mRQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=gVnr34nPFceLyihFGFa4YIMPVvnU+LabOE5jLmitla8=;\n b=EEMSJzSz4NKtLO7MtNDgd/jZNComfBY9D1/0xZeGxa62QKqeZeAAiapkS65tfwFKXH98VVL1S1QCDGqZPcT9M6cZSCRgX7b7CXedH/MF4agwoNXZi7PCiEOi9M/v5Je4KPRQwW7rgEv4P5s8jprXkJDKJ0vTqHr05G/lJKjQ/m7Ta5/hlhCVGGu4PwSJRH6zoje4GdWiSWe3MJdp/BN+esAOEZQo+Gea2wDHPR4YjYH45FOxcE7FC+k+u+f8Hi14Yc7L8OnVqD3mL1PEo2cl9nC7VbOVHJ8T8F7+Z532biG3Z/+v53lKaefZQwZ0hPtIsDocuOd0pfTgjAbAZ3SXcQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,\n <ferruh.yigit@intel.com>, <stable@dpdk.org>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>, Shahaf Shuler <shahafs@nvidia.com>",
        "Date": "Sun, 25 Apr 2021 18:57:21 +0300",
        "Message-ID": "<20210425155722.32477-1-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20210419130204.24348-1-getelson@nvidia.com>",
        "References": "<20210419130204.24348-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.145.6]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "23815201-1d14-4a5b-9f6b-08d90802dbca",
        "X-MS-TrafficTypeDiagnostic": "BY5PR12MB3922:",
        "X-Microsoft-Antispam-PRVS": "\n <BY5PR12MB3922B8C9A84BC093134D3627A5439@BY5PR12MB3922.namprd12.prod.outlook.com>",
        "X-MS-Exchange-Transport-Forked": "True",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:8882;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n XbsM9ADVfnP7kvBrL6eGXZgLwqGGbqMItpW3ksisK9ZOuOxarUWGo6UbZ2703dlygk/3trsKYd/ccRQQ3w8JhP34YCDrvIOu5QCXn/vRBg9IoTbI1oV+Hbl9236oElvWdWS7Qoafo7PnL4OgwHgq+pwlOBrxSLff7D6j5/EfaL/GVqoEhFbj/44PxDPq4wZs6mnKmFAd1L5lAhaSVfACdngD8mQ/qk4NAJagYYqSWh3S+SCFX8s+xOqJb5cqGSzkwB/7g+mwF6C9e8a4UlRB5Q5akTakvBJrmhxYdH/Knlt2D+fxnlpN2uaW8lyUQNPxjxYwTBud5lAWudcJQ6ZtlXbEaUf0fKBE4yNOjVK62uKDsCq1IFG05FDMKpkFR5Z0Ky0aA6HjGaOB7+zwTlb/b9oz5EOSOOPnQwwGl4MmOWdyin9oG+2WrsIMSH7YnRH1YFYTOQVxPOAjUAp3WoZUcvEgoHSepT9SX/VUu3t/gtgF3hnWZpI5MdaM3miMHBJeht3MInRceanrSAyjyD454aOr91EeP1mknE9nuJxS1B3szrbUMNA5seyzmRlfuAduQTMWEQk1rFap8GXEJD7f/8tS2WtGnHFIEHZqoSq0hytVte1CjcZ+rEmdDklW8jYN/Z/SvX60Yb4mzyXLANu8EVRURi682kZaZFNVbErBqM5zR5a1BmyyD7OzXUGe8Nl1",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(39860400002)(376002)(136003)(396003)(346002)(36840700001)(46966006)(8936002)(336012)(36860700001)(30864003)(478600001)(426003)(8676002)(2616005)(4326008)(1076003)(107886003)(16526019)(186003)(36756003)(70206006)(83380400001)(5660300002)(26005)(6666004)(86362001)(47076005)(54906003)(36906005)(70586007)(316002)(82310400003)(356005)(2906002)(7696005)(6916009)(7636003)(82740400003)(6286002)(55016002)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "25 Apr 2021 15:57:41.8632 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 23815201-1d14-4a5b-9f6b-08d90802dbca",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT018.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY5PR12MB3922",
        "Subject": "[dpdk-dev] [PATCH v2 1/2] net/mlx5: fix tunnel offload private\n items location",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Tunnel offload API requires application to query PMD for specific flow\nitems and actions. Application uses these PMD specific elements to\nbuild flow rules according to the tunnel offload model.\nThe model does not restrict private elements location in a flow rule,\nbut the current MLX5 PMD implementation expects that tunnel offload\nrule will begin with PMD specific elements.\nThe patch removes that placement limitation in MLX5 PMD.\n\nCc: stable@dpdk.org\n\nFixes: 4ec6360de37d (\"net/mlx5: implement tunnel offload\")\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c    | 48 ++++++++++++++++++---------\n drivers/net/mlx5/mlx5_flow.h    | 44 ++++++++++++++-----------\n drivers/net/mlx5/mlx5_flow_dv.c | 58 +++++++++++++++++++--------------\n 3 files changed, 90 insertions(+), 60 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 84463074a5..fcc82ce9d4 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -51,6 +51,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \t\t\t     const struct rte_flow_attr *attr,\n \t\t\t     const struct rte_flow_action *app_actions,\n \t\t\t     uint32_t flow_idx,\n+\t\t\t     const struct mlx5_flow_tunnel *tunnel,\n \t\t\t     struct tunnel_default_miss_ctx *ctx,\n \t\t\t     struct rte_flow_error *error);\n static struct mlx5_flow_tunnel *\n@@ -5463,22 +5464,14 @@ flow_create_split_outer(struct rte_eth_dev *dev,\n \treturn ret;\n }\n \n-static struct mlx5_flow_tunnel *\n-flow_tunnel_from_rule(struct rte_eth_dev *dev,\n-\t\t      const struct rte_flow_attr *attr,\n-\t\t      const struct rte_flow_item items[],\n-\t\t      const struct rte_flow_action actions[])\n+static inline struct mlx5_flow_tunnel *\n+flow_tunnel_from_rule(const struct mlx5_flow *flow)\n {\n \tstruct mlx5_flow_tunnel *tunnel;\n \n #pragma GCC diagnostic push\n #pragma GCC diagnostic ignored \"-Wcast-qual\"\n-\tif (is_flow_tunnel_match_rule(dev, attr, items, actions))\n-\t\ttunnel = (struct mlx5_flow_tunnel *)items[0].spec;\n-\telse if (is_flow_tunnel_steer_rule(dev, attr, items, actions))\n-\t\ttunnel = (struct mlx5_flow_tunnel *)actions[0].conf;\n-\telse\n-\t\ttunnel = NULL;\n+\ttunnel = (typeof(tunnel))flow->tunnel;\n #pragma GCC diagnostic pop\n \n \treturn tunnel;\n@@ -5672,12 +5665,11 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t\t\t\t\t      error);\n \t\tif (ret < 0)\n \t\t\tgoto error;\n-\t\tif (is_flow_tunnel_steer_rule(dev, attr,\n-\t\t\t\t\t      buf->entry[i].pattern,\n-\t\t\t\t\t      p_actions_rx)) {\n+\t\tif (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {\n \t\t\tret = flow_tunnel_add_default_miss(dev, flow, attr,\n \t\t\t\t\t\t\t   p_actions_rx,\n \t\t\t\t\t\t\t   idx,\n+\t\t\t\t\t\t\t   wks->flows[0].tunnel,\n \t\t\t\t\t\t\t   &default_miss_ctx,\n \t\t\t\t\t\t\t   error);\n \t\t\tif (ret < 0) {\n@@ -5741,7 +5733,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t}\n \tflow_rxq_flags_set(dev, flow);\n \trte_free(translated_actions);\n-\ttunnel = flow_tunnel_from_rule(dev, attr, items, actions);\n+\ttunnel = flow_tunnel_from_rule(wks->flows);\n \tif (tunnel) {\n \t\tflow->tunnel = 1;\n \t\tflow->tunnel_id = tunnel->tunnel_id;\n@@ -7459,6 +7451,28 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)\n \treturn ret;\n }\n \n+const struct mlx5_flow_tunnel *\n+mlx5_get_tof(const struct rte_flow_item *item,\n+\t     const struct rte_flow_action *action,\n+\t     enum mlx5_tof_rule_type *rule_type)\n+{\n+\tfor (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {\n+\t\tif (item->type == (typeof(item->type))\n+\t\t\t\t  MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {\n+\t\t\t*rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;\n+\t\t\treturn flow_items_to_tunnel(item);\n+\t\t}\n+\t}\n+\tfor (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {\n+\t\tif (action->type == (typeof(action->type))\n+\t\t\t\t    MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {\n+\t\t\t*rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;\n+\t\t\treturn flow_actions_to_tunnel(action);\n+\t\t}\n+\t}\n+\treturn NULL;\n+}\n+\n /**\n  * tunnel offload functionalilty is defined for DV environment only\n  */\n@@ -7489,13 +7503,13 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \t\t\t     const struct rte_flow_attr *attr,\n \t\t\t     const struct rte_flow_action *app_actions,\n \t\t\t     uint32_t flow_idx,\n+\t\t\t     const struct mlx5_flow_tunnel *tunnel,\n \t\t\t     struct tunnel_default_miss_ctx *ctx,\n \t\t\t     struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow *dev_flow;\n \tstruct rte_flow_attr miss_attr = *attr;\n-\tconst struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;\n \tconst struct rte_flow_item miss_items[2] = {\n \t\t{\n \t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n@@ -7581,6 +7595,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \tdev_flow->flow = flow;\n \tdev_flow->external = true;\n \tdev_flow->tunnel = tunnel;\n+\tdev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;\n \t/* Subflow object was created, we must include one in the list. */\n \tSILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,\n \t\t      dev_flow->handle, next);\n@@ -8192,6 +8207,7 @@ flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,\n \t\t\t     __rte_unused const struct rte_flow_attr *attr,\n \t\t\t     __rte_unused const struct rte_flow_action *actions,\n \t\t\t     __rte_unused uint32_t flow_idx,\n+\t\t\t     __rte_unused const struct mlx5_flow_tunnel *tunnel,\n \t\t\t     __rte_unused struct tunnel_default_miss_ctx *ctx,\n \t\t\t     __rte_unused struct rte_flow_error *error)\n {\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex ec673c29ab..61f40adc25 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -783,6 +783,16 @@ struct mlx5_flow_verbs_workspace {\n /** Maximal number of device sub-flows supported. */\n #define MLX5_NUM_MAX_DEV_FLOWS 32\n \n+/**\n+ * tunnel offload rules type\n+ */\n+enum mlx5_tof_rule_type {\n+\tMLX5_TUNNEL_OFFLOAD_NONE = 0,\n+\tMLX5_TUNNEL_OFFLOAD_SET_RULE,\n+\tMLX5_TUNNEL_OFFLOAD_MATCH_RULE,\n+\tMLX5_TUNNEL_OFFLOAD_MISS_RULE,\n+};\n+\n /** Device flow structure. */\n __extension__\n struct mlx5_flow {\n@@ -818,6 +828,7 @@ struct mlx5_flow {\n \tstruct mlx5_flow_handle *handle;\n \tuint32_t handle_idx; /* Index of the mlx5 flow handle memory. */\n \tconst struct mlx5_flow_tunnel *tunnel;\n+\tenum mlx5_tof_rule_type tof_type;\n };\n \n /* Flow meter state. */\n@@ -1029,10 +1040,10 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)\n }\n \n static inline bool\n-is_tunnel_offload_active(struct rte_eth_dev *dev)\n+is_tunnel_offload_active(const struct rte_eth_dev *dev)\n {\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct mlx5_priv *priv = dev->data->dev_private;\n \treturn !!priv->config.dv_miss_info;\n #else\n \tRTE_SET_USED(dev);\n@@ -1041,23 +1052,15 @@ is_tunnel_offload_active(struct rte_eth_dev *dev)\n }\n \n static inline bool\n-is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,\n-\t\t\t  __rte_unused const struct rte_flow_attr *attr,\n-\t\t\t  __rte_unused const struct rte_flow_item items[],\n-\t\t\t  __rte_unused const struct rte_flow_action actions[])\n+is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)\n {\n-\treturn (items[0].type == (typeof(items[0].type))\n-\t\t\t\t MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);\n+\treturn tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;\n }\n \n static inline bool\n-is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,\n-\t\t\t  __rte_unused const struct rte_flow_attr *attr,\n-\t\t\t  __rte_unused const struct rte_flow_item items[],\n-\t\t\t  __rte_unused const struct rte_flow_action actions[])\n+is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)\n {\n-\treturn (actions[0].type == (typeof(actions[0].type))\n-\t\t\t\t   MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);\n+\treturn tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;\n }\n \n static inline const struct mlx5_flow_tunnel *\n@@ -1299,11 +1302,10 @@ struct flow_grp_info {\n \n static inline bool\n tunnel_use_standard_attr_group_translate\n-\t\t    (struct rte_eth_dev *dev,\n-\t\t     const struct mlx5_flow_tunnel *tunnel,\n+\t\t    (const struct rte_eth_dev *dev,\n \t\t     const struct rte_flow_attr *attr,\n-\t\t     const struct rte_flow_item items[],\n-\t\t     const struct rte_flow_action actions[])\n+\t\t     const struct mlx5_flow_tunnel *tunnel,\n+\t\t     enum mlx5_tof_rule_type tof_rule_type)\n {\n \tbool verdict;\n \n@@ -1319,7 +1321,7 @@ tunnel_use_standard_attr_group_translate\n \t\t * method\n \t\t */\n \t\tverdict = !attr->group &&\n-\t\t\t  is_flow_tunnel_steer_rule(dev, attr, items, actions);\n+\t\t\t  is_flow_tunnel_steer_rule(tof_rule_type);\n \t} else {\n \t\t/*\n \t\t * non-tunnel group translation uses standard method for\n@@ -1580,6 +1582,10 @@ int mlx5_flow_os_init_workspace_once(void);\n void *mlx5_flow_os_get_specific_workspace(void);\n int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);\n void mlx5_flow_os_release_workspace(void);\n+const struct mlx5_flow_tunnel *\n+mlx5_get_tof(const struct rte_flow_item *items,\n+\t     const struct rte_flow_action *actions,\n+\t     enum mlx5_tof_rule_type *rule_type);\n \n \n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex e65cc13bd6..3b16f75743 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -6100,32 +6100,33 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \tuint32_t rw_act_num = 0;\n \tuint64_t is_root;\n \tconst struct mlx5_flow_tunnel *tunnel;\n+\tenum mlx5_tof_rule_type tof_rule_type;\n \tstruct flow_grp_info grp_info = {\n \t\t.external = !!external,\n \t\t.transfer = !!attr->transfer,\n \t\t.fdb_def_rule = !!priv->fdb_def_rule,\n+\t\t.std_tbl_fix = true,\n \t};\n \tconst struct rte_eth_hairpin_conf *conf;\n \n \tif (items == NULL)\n \t\treturn -1;\n-\tif (is_flow_tunnel_match_rule(dev, attr, items, actions)) {\n-\t\ttunnel = flow_items_to_tunnel(items);\n-\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |\n-\t\t\t\tMLX5_FLOW_ACTION_DECAP;\n-\t} else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {\n-\t\ttunnel = flow_actions_to_tunnel(actions);\n-\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;\n-\t} else {\n-\t\ttunnel = NULL;\n+\ttunnel = is_tunnel_offload_active(dev) ?\n+\t\t mlx5_get_tof(items, actions, &tof_rule_type) : NULL;\n+\tif (tunnel) {\n+\t\tif (priv->representor)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, ENOTSUP,\n+\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t NULL, \"decap not supported for VF representor\");\n+\t\tif (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;\n+\t\telse if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |\n+\t\t\t\t\tMLX5_FLOW_ACTION_DECAP;\n+\t\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n+\t\t\t\t\t(dev, attr, tunnel, tof_rule_type);\n \t}\n-\tif (tunnel && priv->representor)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t\t  \"decap not supported \"\n-\t\t\t\t\t  \"for VF representor\");\n-\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n-\t\t\t\t(dev, tunnel, attr, items, actions);\n \tret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);\n \tif (ret < 0)\n \t\treturn ret;\n@@ -10909,13 +10910,14 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \tint tmp_actions_n = 0;\n \tuint32_t table;\n \tint ret = 0;\n-\tconst struct mlx5_flow_tunnel *tunnel;\n+\tconst struct mlx5_flow_tunnel *tunnel = NULL;\n \tstruct flow_grp_info grp_info = {\n \t\t.external = !!dev_flow->external,\n \t\t.transfer = !!attr->transfer,\n \t\t.fdb_def_rule = !!priv->fdb_def_rule,\n \t\t.skip_scale = dev_flow->skip_scale &\n \t\t\t(1 << MLX5_SCALE_FLOW_GROUP_BIT),\n+\t\t.std_tbl_fix = true,\n \t};\n \n \tif (!wks)\n@@ -10930,15 +10932,21 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n \t/* update normal path action resource into last index of array */\n \tsample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];\n-\ttunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?\n-\t\t flow_items_to_tunnel(items) :\n-\t\t is_flow_tunnel_steer_rule(dev, attr, items, actions) ?\n-\t\t flow_actions_to_tunnel(actions) :\n-\t\t dev_flow->tunnel ? dev_flow->tunnel : NULL;\n+\tif (is_tunnel_offload_active(dev)) {\n+\t\tif (dev_flow->tunnel) {\n+\t\t\tRTE_VERIFY(dev_flow->tof_type ==\n+\t\t\t\t   MLX5_TUNNEL_OFFLOAD_MISS_RULE);\n+\t\t\ttunnel = dev_flow->tunnel;\n+\t\t} else {\n+\t\t\ttunnel = mlx5_get_tof(items, actions,\n+\t\t\t\t\t      &dev_flow->tof_type);\n+\t\t\tdev_flow->tunnel = tunnel;\n+\t\t}\n+\t\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n+\t\t\t\t\t(dev, attr, tunnel, dev_flow->tof_type);\n+\t}\n \tmhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :\n \t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n-\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n-\t\t\t\t(dev, tunnel, attr, items, actions);\n \tret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,\n \t\t\t\t       &grp_info, error);\n \tif (ret)\n@@ -10948,7 +10956,7 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\tmhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;\n \t/* number of actions must be set to 0 in case of dirty stack. */\n \tmhdr_res->actions_num = 0;\n-\tif (is_flow_tunnel_match_rule(dev, attr, items, actions)) {\n+\tif (is_flow_tunnel_match_rule(dev_flow->tof_type)) {\n \t\t/*\n \t\t * do not add decap action if match rule drops packet\n \t\t * HW rejects rules with decap & drop\n",
    "prefixes": [
        "v2",
        "1/2"
    ]
}