get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/91752/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 91752,
    "url": "http://patches.dpdk.org/api/patches/91752/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210419130204.24348-1-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210419130204.24348-1-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210419130204.24348-1-getelson@nvidia.com",
    "date": "2021-04-19T13:02:03",
    "name": "[1/2] net/mlx5: fix tunnel offload private items location",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "42c9f6a49d8ba76109d78628308bd2938603a540",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210419130204.24348-1-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 16491,
            "url": "http://patches.dpdk.org/api/series/16491/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=16491",
            "date": "2021-04-19T13:02:03",
            "name": "[1/2] net/mlx5: fix tunnel offload private items location",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/16491/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/91752/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/91752/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 38B94A0524;\n\tMon, 19 Apr 2021 15:02:27 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id BDFAB41283;\n\tMon, 19 Apr 2021 15:02:26 +0200 (CEST)",
            "from NAM10-BN7-obe.outbound.protection.outlook.com\n (mail-bn7nam10on2060.outbound.protection.outlook.com [40.107.92.60])\n by mails.dpdk.org (Postfix) with ESMTP id 5E8F44123B;\n Mon, 19 Apr 2021 15:02:25 +0200 (CEST)",
            "from MWHPR19CA0022.namprd19.prod.outlook.com (2603:10b6:300:d4::32)\n by BYAPR12MB2840.namprd12.prod.outlook.com (2603:10b6:a03:62::32)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4042.18; Mon, 19 Apr\n 2021 13:02:23 +0000",
            "from CO1NAM11FT046.eop-nam11.prod.protection.outlook.com\n (2603:10b6:300:d4:cafe::a6) by MWHPR19CA0022.outlook.office365.com\n (2603:10b6:300:d4::32) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4042.16 via Frontend\n Transport; Mon, 19 Apr 2021 13:02:23 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n CO1NAM11FT046.mail.protection.outlook.com (10.13.174.203) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4042.16 via Frontend Transport; Mon, 19 Apr 2021 13:02:22 +0000",
            "from nvidia.com (172.20.145.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 19 Apr\n 2021 13:02:19 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=nl/18wTTlczio6bn7qVQ55K37AJ6r71lS4CHXmGxJoLOsKou5ICWdMuse6Yxub4Sfdshrpwxt7K3GEj0XDcIv4U93fU2y+neWASJ0FgS24P1kHY2O+/z5KFHgerut3v3uVtIq/WsENDT4t/tRd21l/TIKQA6bQyseyiP4govrkdK6ko+kzTaiEphC+zYVkiqex9jy1qy082a80YTafWKQoucp04/ujoSTIPr04jIfk7fZZgkJ+ws5YWHpCSrlxcQ1U8kP68E1fmcCUDtzklJlOqaxGRVMSLZS5npNeP2OWlBRHuEbQUwiS9RGYymi2katHxD6oxeKhhbRQcuZTW8Kw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=UAvXVboLBWh/EgzuGeAOTW95L8iJQ8WTcpPu0UV1sN8=;\n b=mdq/KLPdu4A+uW9t6IJeAbr7FJxo5LnSIDsNI19ikxPiuljvSwF1ML1/JR2HYXK7mv9z8c9/vp+f4tdlvo1GBNSErlbAnwCBn+u9djCF83WlC8AceVxNILMtEbGMBGcHw/Sv91pY/+ioPmjre9dZUoLc9y+OPwLXA5AQu3dNSMI5VcMuArocXit+VkdFq/tP5GaENc4IiRqwlxzV4+bKbRVN6pihDXh6Zw//8I8x6bWT1GOo4v6MQow6WgW2OJaDyUyhR5p1chZLfE+QFQUIXC4D7x8egkP3vEMKyGXnuHC/FTHB335YYDXYDKmaibP0q4ZZfgRRe2sX08NEzaOpeg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=UAvXVboLBWh/EgzuGeAOTW95L8iJQ8WTcpPu0UV1sN8=;\n b=XW19EmXP4X5QryOdySJA3tAj2rMuIzBg1ykMG0HMxVqqwf9bA9GzNAMyNlhmK4YZTLvph4m40ysOW/jCwW18F19/KFuVpNba+sq7L5T0GOy/UEmPGls44NwsrAPkYiPBtfQsJLzCGfSn5xdOgbMMfqxsfGxIr3vtDf18KFrh7NFTYPz/w7zQ+l+bYG4+TI890YI+gNVH17y72Dk03c7Ztrfsf9HnMJaiLqRZ+LwrjhqP/uipFBYp2xgyD+lcCvxeyvooIU996hc7/55CSv38tGjweC9K1/qn4QbMGLQNhRDTL+ANY+2ep4TV6/FX5VJvQNfmAYgLAcXONkAgwoYjkg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,\n <stable@dpdk.org>, Viacheslav Ovsiienko <viacheslavo@mellanox.com>, \"Shahaf\n Shuler\" <shahafs@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Mon, 19 Apr 2021 16:02:03 +0300",
        "Message-ID": "<20210419130204.24348-1-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.145.6]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "4f2f4e74-e338-43f3-937d-08d903335f84",
        "X-MS-TrafficTypeDiagnostic": "BYAPR12MB2840:",
        "X-Microsoft-Antispam-PRVS": "\n <BYAPR12MB2840D060928ECC920C5D3C69A5499@BYAPR12MB2840.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:7691;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n f3KfqLxTGjr1TpCXnGLlC07DFrpiFsRQ7u5vCNZ89h79lc1hsa3Qc+HRCl+I0h+RvwFRDrjmHRzL5I1iiwDOMuUZ3eHs539iMyhJzTKSH1lbqJuzkoubZPRg57ewp73kg0qydYsZ2zX8zpNyKKET0UOe1YsevPc5usJ+GX8GrkYjGBZHu8RVLn5pmKSliZNKMhEL/SRJoEkao3EJQXxzc9glRfrRla8v89ieaza8u5y3p0u+GKWYlCRTNGSQOyhmy0dxXF0REUZPBVYFdbiDxswuWMlj2nm5q/j+llh51mn+d2lNoOhj/wed7PnRIl3jAgAsuTfN42f3V0Q94S4PsfzYsJnA8MHp7eyTJv/I0GoIdgftZXP4bbgMtBi+jyLKpZckTnYWY4PJhfNd7ye4khJ4xpewzESeqMkBX99yrNVSLD0uYEF7QyXiiBWcpMpzGJG7Dxze7QaA+SgPgW6CHqqtvvlIaabNS9dXBa1oWg9Wz/lve2LuI4HdPzyd6CRDqYmwEMAbPFmgsI6Md21mvJUQTSzUnap4NfLp2v34lOG1mWsGorOUKI583tk236SiHmntv7M3ZIU5gsNgI41cg3H9DzNfkgi3NUjF8P64ow01jsdO0F9/X/KFCpcDY3O7cl4w5TltA5s9WtEWE3/pyeHtGcYjBvj/6vqawQvWO8hA7mrfy1rS3C0ewqfCIVDz",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(346002)(376002)(136003)(396003)(39860400002)(46966006)(36840700001)(6916009)(7696005)(336012)(2616005)(426003)(86362001)(82740400003)(54906003)(478600001)(6286002)(83380400001)(356005)(7636003)(82310400003)(4326008)(70206006)(450100002)(107886003)(70586007)(8676002)(26005)(16526019)(186003)(36860700001)(8936002)(2906002)(1076003)(30864003)(47076005)(55016002)(5660300002)(316002)(6666004)(36906005)(36756003)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "19 Apr 2021 13:02:22.9365 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 4f2f4e74-e338-43f3-937d-08d903335f84",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT046.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR12MB2840",
        "Subject": "[dpdk-dev] [PATCH 1/2] net/mlx5: fix tunnel offload private items\n location",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Flow rules used in tunnel offload model require application to query\nPMD for private flow elements and explicitly add these elements to\nflow rule.\nTunnel offload model does not restrict private elements location in\na flow rule.\nThe patch fixes MLX5 PMD requirement to place private tunnel offload\nPMD flow elements before general flow items in a rule.\n\nFixes: 4ec6360de37d (\"net/mlx5: implement tunnel offload\")\n\nCc: stable@dpdk.org\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>\n---\n drivers/net/mlx5/mlx5_flow.c    | 48 ++++++++++++++++++---------\n drivers/net/mlx5/mlx5_flow.h    | 44 ++++++++++++++-----------\n drivers/net/mlx5/mlx5_flow_dv.c | 58 +++++++++++++++++++--------------\n 3 files changed, 90 insertions(+), 60 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex c347f8130..67973d73d 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -50,6 +50,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \t\t\t     const struct rte_flow_attr *attr,\n \t\t\t     const struct rte_flow_action *app_actions,\n \t\t\t     uint32_t flow_idx,\n+\t\t\t     const struct mlx5_flow_tunnel *tunnel,\n \t\t\t     struct tunnel_default_miss_ctx *ctx,\n \t\t\t     struct rte_flow_error *error);\n static struct mlx5_flow_tunnel *\n@@ -5478,22 +5479,14 @@ flow_create_split_outer(struct rte_eth_dev *dev,\n \treturn ret;\n }\n \n-static struct mlx5_flow_tunnel *\n-flow_tunnel_from_rule(struct rte_eth_dev *dev,\n-\t\t      const struct rte_flow_attr *attr,\n-\t\t      const struct rte_flow_item items[],\n-\t\t      const struct rte_flow_action actions[])\n+static inline struct mlx5_flow_tunnel *\n+flow_tunnel_from_rule(const struct mlx5_flow *flow)\n {\n \tstruct mlx5_flow_tunnel *tunnel;\n \n #pragma GCC diagnostic push\n #pragma GCC diagnostic ignored \"-Wcast-qual\"\n-\tif (is_flow_tunnel_match_rule(dev, attr, items, actions))\n-\t\ttunnel = (struct mlx5_flow_tunnel *)items[0].spec;\n-\telse if (is_flow_tunnel_steer_rule(dev, attr, items, actions))\n-\t\ttunnel = (struct mlx5_flow_tunnel *)actions[0].conf;\n-\telse\n-\t\ttunnel = NULL;\n+\ttunnel = (typeof(tunnel))flow->tunnel;\n #pragma GCC diagnostic pop\n \n \treturn tunnel;\n@@ -5687,12 +5680,11 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t\t\t\t\t      error);\n \t\tif (ret < 0)\n \t\t\tgoto error;\n-\t\tif (is_flow_tunnel_steer_rule(dev, attr,\n-\t\t\t\t\t      buf->entry[i].pattern,\n-\t\t\t\t\t      p_actions_rx)) {\n+\t\tif (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {\n \t\t\tret = flow_tunnel_add_default_miss(dev, flow, attr,\n \t\t\t\t\t\t\t   p_actions_rx,\n \t\t\t\t\t\t\t   idx,\n+\t\t\t\t\t\t\t   wks->flows[0].tunnel,\n \t\t\t\t\t\t\t   &default_miss_ctx,\n \t\t\t\t\t\t\t   error);\n \t\t\tif (ret < 0) {\n@@ -5756,7 +5748,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t}\n \tflow_rxq_flags_set(dev, flow);\n \trte_free(translated_actions);\n-\ttunnel = flow_tunnel_from_rule(dev, attr, items, actions);\n+\ttunnel = flow_tunnel_from_rule(wks->flows);\n \tif (tunnel) {\n \t\tflow->tunnel = 1;\n \t\tflow->tunnel_id = tunnel->tunnel_id;\n@@ -7471,6 +7463,28 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)\n \treturn ret;\n }\n \n+const struct mlx5_flow_tunnel *\n+mlx5_get_tof(const struct rte_flow_item *item,\n+\t     const struct rte_flow_action *action,\n+\t     enum mlx5_tof_rule_type *rule_type)\n+{\n+\tfor (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {\n+\t\tif (item->type == (typeof(item->type))\n+\t\t\t\t  MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {\n+\t\t\t*rule_type = MLX5_TUNNEL_OFFLOAD_MATCH_RULE;\n+\t\t\treturn flow_items_to_tunnel(item);\n+\t\t}\n+\t}\n+\tfor (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {\n+\t\tif (action->type == (typeof(action->type))\n+\t\t\t\t    MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {\n+\t\t\t*rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;\n+\t\t\treturn flow_actions_to_tunnel(action);\n+\t\t}\n+\t}\n+\treturn NULL;\n+}\n+\n /**\n  * tunnel offload functionalilty is defined for DV environment only\n  */\n@@ -7501,13 +7515,13 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \t\t\t     const struct rte_flow_attr *attr,\n \t\t\t     const struct rte_flow_action *app_actions,\n \t\t\t     uint32_t flow_idx,\n+\t\t\t     const struct mlx5_flow_tunnel *tunnel,\n \t\t\t     struct tunnel_default_miss_ctx *ctx,\n \t\t\t     struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow *dev_flow;\n \tstruct rte_flow_attr miss_attr = *attr;\n-\tconst struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;\n \tconst struct rte_flow_item miss_items[2] = {\n \t\t{\n \t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n@@ -7593,6 +7607,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \tdev_flow->flow = flow;\n \tdev_flow->external = true;\n \tdev_flow->tunnel = tunnel;\n+\tdev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;\n \t/* Subflow object was created, we must include one in the list. */\n \tSILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,\n \t\t      dev_flow->handle, next);\n@@ -8204,6 +8219,7 @@ flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,\n \t\t\t     __rte_unused const struct rte_flow_attr *attr,\n \t\t\t     __rte_unused const struct rte_flow_action *actions,\n \t\t\t     __rte_unused uint32_t flow_idx,\n+\t\t\t     __rte_unused const struct mlx5_flow_tunnel *tunnel,\n \t\t\t     __rte_unused struct tunnel_default_miss_ctx *ctx,\n \t\t\t     __rte_unused struct rte_flow_error *error)\n {\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex ec673c29a..61f40adc2 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -783,6 +783,16 @@ struct mlx5_flow_verbs_workspace {\n /** Maximal number of device sub-flows supported. */\n #define MLX5_NUM_MAX_DEV_FLOWS 32\n \n+/**\n+ * tunnel offload rules type\n+ */\n+enum mlx5_tof_rule_type {\n+\tMLX5_TUNNEL_OFFLOAD_NONE = 0,\n+\tMLX5_TUNNEL_OFFLOAD_SET_RULE,\n+\tMLX5_TUNNEL_OFFLOAD_MATCH_RULE,\n+\tMLX5_TUNNEL_OFFLOAD_MISS_RULE,\n+};\n+\n /** Device flow structure. */\n __extension__\n struct mlx5_flow {\n@@ -818,6 +828,7 @@ struct mlx5_flow {\n \tstruct mlx5_flow_handle *handle;\n \tuint32_t handle_idx; /* Index of the mlx5 flow handle memory. */\n \tconst struct mlx5_flow_tunnel *tunnel;\n+\tenum mlx5_tof_rule_type tof_type;\n };\n \n /* Flow meter state. */\n@@ -1029,10 +1040,10 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)\n }\n \n static inline bool\n-is_tunnel_offload_active(struct rte_eth_dev *dev)\n+is_tunnel_offload_active(const struct rte_eth_dev *dev)\n {\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct mlx5_priv *priv = dev->data->dev_private;\n \treturn !!priv->config.dv_miss_info;\n #else\n \tRTE_SET_USED(dev);\n@@ -1041,23 +1052,15 @@ is_tunnel_offload_active(struct rte_eth_dev *dev)\n }\n \n static inline bool\n-is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,\n-\t\t\t  __rte_unused const struct rte_flow_attr *attr,\n-\t\t\t  __rte_unused const struct rte_flow_item items[],\n-\t\t\t  __rte_unused const struct rte_flow_action actions[])\n+is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)\n {\n-\treturn (items[0].type == (typeof(items[0].type))\n-\t\t\t\t MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);\n+\treturn tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;\n }\n \n static inline bool\n-is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,\n-\t\t\t  __rte_unused const struct rte_flow_attr *attr,\n-\t\t\t  __rte_unused const struct rte_flow_item items[],\n-\t\t\t  __rte_unused const struct rte_flow_action actions[])\n+is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)\n {\n-\treturn (actions[0].type == (typeof(actions[0].type))\n-\t\t\t\t   MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);\n+\treturn tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;\n }\n \n static inline const struct mlx5_flow_tunnel *\n@@ -1299,11 +1302,10 @@ struct flow_grp_info {\n \n static inline bool\n tunnel_use_standard_attr_group_translate\n-\t\t    (struct rte_eth_dev *dev,\n-\t\t     const struct mlx5_flow_tunnel *tunnel,\n+\t\t    (const struct rte_eth_dev *dev,\n \t\t     const struct rte_flow_attr *attr,\n-\t\t     const struct rte_flow_item items[],\n-\t\t     const struct rte_flow_action actions[])\n+\t\t     const struct mlx5_flow_tunnel *tunnel,\n+\t\t     enum mlx5_tof_rule_type tof_rule_type)\n {\n \tbool verdict;\n \n@@ -1319,7 +1321,7 @@ tunnel_use_standard_attr_group_translate\n \t\t * method\n \t\t */\n \t\tverdict = !attr->group &&\n-\t\t\t  is_flow_tunnel_steer_rule(dev, attr, items, actions);\n+\t\t\t  is_flow_tunnel_steer_rule(tof_rule_type);\n \t} else {\n \t\t/*\n \t\t * non-tunnel group translation uses standard method for\n@@ -1580,6 +1582,10 @@ int mlx5_flow_os_init_workspace_once(void);\n void *mlx5_flow_os_get_specific_workspace(void);\n int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);\n void mlx5_flow_os_release_workspace(void);\n+const struct mlx5_flow_tunnel *\n+mlx5_get_tof(const struct rte_flow_item *items,\n+\t     const struct rte_flow_action *actions,\n+\t     enum mlx5_tof_rule_type *rule_type);\n \n \n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 533dadf07..c9fe4aa8c 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -6099,32 +6099,33 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \tuint32_t rw_act_num = 0;\n \tuint64_t is_root;\n \tconst struct mlx5_flow_tunnel *tunnel;\n+\tenum mlx5_tof_rule_type tof_rule_type;\n \tstruct flow_grp_info grp_info = {\n \t\t.external = !!external,\n \t\t.transfer = !!attr->transfer,\n \t\t.fdb_def_rule = !!priv->fdb_def_rule,\n+\t\t.std_tbl_fix = true,\n \t};\n \tconst struct rte_eth_hairpin_conf *conf;\n \n \tif (items == NULL)\n \t\treturn -1;\n-\tif (is_flow_tunnel_match_rule(dev, attr, items, actions)) {\n-\t\ttunnel = flow_items_to_tunnel(items);\n-\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |\n-\t\t\t\tMLX5_FLOW_ACTION_DECAP;\n-\t} else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {\n-\t\ttunnel = flow_actions_to_tunnel(actions);\n-\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;\n-\t} else {\n-\t\ttunnel = NULL;\n+\ttunnel = is_tunnel_offload_active(dev) ?\n+\t\t mlx5_get_tof(items, actions, &tof_rule_type) : NULL;\n+\tif (tunnel) {\n+\t\tif (priv->representor)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, ENOTSUP,\n+\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t NULL, \"decap not supported for VF representor\");\n+\t\tif (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;\n+\t\telse if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |\n+\t\t\t\t\tMLX5_FLOW_ACTION_DECAP;\n+\t\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n+\t\t\t\t\t(dev, attr, tunnel, tof_rule_type);\n \t}\n-\tif (tunnel && priv->representor)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t\t  \"decap not supported \"\n-\t\t\t\t\t  \"for VF representor\");\n-\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n-\t\t\t\t(dev, tunnel, attr, items, actions);\n \tret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);\n \tif (ret < 0)\n \t\treturn ret;\n@@ -10920,13 +10921,14 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \tint tmp_actions_n = 0;\n \tuint32_t table;\n \tint ret = 0;\n-\tconst struct mlx5_flow_tunnel *tunnel;\n+\tconst struct mlx5_flow_tunnel *tunnel = NULL;\n \tstruct flow_grp_info grp_info = {\n \t\t.external = !!dev_flow->external,\n \t\t.transfer = !!attr->transfer,\n \t\t.fdb_def_rule = !!priv->fdb_def_rule,\n \t\t.skip_scale = dev_flow->skip_scale &\n \t\t\t(1 << MLX5_SCALE_FLOW_GROUP_BIT),\n+\t\t.std_tbl_fix = true,\n \t};\n \n \tif (!wks)\n@@ -10941,15 +10943,21 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n \t/* update normal path action resource into last index of array */\n \tsample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];\n-\ttunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?\n-\t\t flow_items_to_tunnel(items) :\n-\t\t is_flow_tunnel_steer_rule(dev, attr, items, actions) ?\n-\t\t flow_actions_to_tunnel(actions) :\n-\t\t dev_flow->tunnel ? dev_flow->tunnel : NULL;\n+\tif (is_tunnel_offload_active(dev)) {\n+\t\tif (dev_flow->tunnel) {\n+\t\t\tRTE_VERIFY(dev_flow->tof_type ==\n+\t\t\t\t   MLX5_TUNNEL_OFFLOAD_MISS_RULE);\n+\t\t\ttunnel = dev_flow->tunnel;\n+\t\t} else {\n+\t\t\ttunnel = mlx5_get_tof(items, actions,\n+\t\t\t\t\t      &dev_flow->tof_type);\n+\t\t\tdev_flow->tunnel = tunnel;\n+\t\t}\n+\t\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n+\t\t\t\t\t(dev, attr, tunnel, dev_flow->tof_type);\n+\t}\n \tmhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :\n \t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n-\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n-\t\t\t\t(dev, tunnel, attr, items, actions);\n \tret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,\n \t\t\t\t       &grp_info, error);\n \tif (ret)\n@@ -10959,7 +10967,7 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\tmhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;\n \t/* number of actions must be set to 0 in case of dirty stack. */\n \tmhdr_res->actions_num = 0;\n-\tif (is_flow_tunnel_match_rule(dev, attr, items, actions)) {\n+\tif (is_flow_tunnel_match_rule(dev_flow->tof_type)) {\n \t\t/*\n \t\t * do not add decap action if match rule drops packet\n \t\t * HW rejects rules with decap & drop\n",
    "prefixes": [
        "1/2"
    ]
}