get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133184/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133184,
    "url": "http://patches.dpdk.org/api/patches/133184/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231023124225.141461-11-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231023124225.141461-11-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231023124225.141461-11-getelson@nvidia.com",
    "date": "2023-10-23T12:42:25",
    "name": "[v4,10/10] net/mlx5: support indirect list METER_MARK action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4c424cca1bd29fcaed3859a9c58ca6c425c9a169",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231023124225.141461-11-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 29953,
            "url": "http://patches.dpdk.org/api/series/29953/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29953",
            "date": "2023-10-23T12:42:15",
            "name": "net/mlx5: support indirect actions list",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/29953/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/133184/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/133184/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1E31F431E2;\n\tMon, 23 Oct 2023 14:44:17 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1F19942DA1;\n\tMon, 23 Oct 2023 14:43:39 +0200 (CEST)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2078.outbound.protection.outlook.com [40.107.93.78])\n by mails.dpdk.org (Postfix) with ESMTP id 32B4842DA1\n for <dev@dpdk.org>; Mon, 23 Oct 2023 14:43:38 +0200 (CEST)",
            "from BYAPR11CA0094.namprd11.prod.outlook.com (2603:10b6:a03:f4::35)\n by SJ0PR12MB8139.namprd12.prod.outlook.com (2603:10b6:a03:4e8::5)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6907.26; Mon, 23 Oct\n 2023 12:43:35 +0000",
            "from CO1PEPF000044F0.namprd05.prod.outlook.com\n (2603:10b6:a03:f4:cafe::27) by BYAPR11CA0094.outlook.office365.com\n (2603:10b6:a03:f4::35) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6907.33 via Frontend\n Transport; Mon, 23 Oct 2023 12:43:35 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n CO1PEPF000044F0.mail.protection.outlook.com (10.167.241.70) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6933.15 via Frontend Transport; Mon, 23 Oct 2023 12:43:35 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Mon, 23 Oct\n 2023 05:43:26 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Mon, 23 Oct\n 2023 05:43:23 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=eq8MHUE6d+ZesV4EzBhCjjSPliPdz4EDKdM2MBLNhSWGZ0nvj+e5BawXNiNCOf4uP4GhgxuXTyEb9G3SHY4qmffrCccRW7RYQDoi79+CfDiRmrOq7oj92vp5ItwcB8TdKvFu3VRvGUba+Bg4Ja09yXH+KPvR8YzKG91o7qyhx8l48PAki0h2GwiXAr6jNZBD0u1j6c5i7J1qyLsYl+iVLfgk0ZkI8iFYkK88bTb7ZyZZNlOD/2Ym4DP0iDLmGj2nQlFdys9ifmpeO1ZfmtOJaiClqHrhRkoVYk03PK1rm8xov/4G+f49rVgnfHvV/0OA0wrvqnhkCltRYeZ7jPwluQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=92IsjRMkLdoZnpTIMgMSIhya2q0pU556PrDqsIDCz/4=;\n b=NHQYr/AVzu7xhMowoA385hFkOAf9TmI3goOz9IT2fUGgOvlpw87v0BlzHKmCv2SNNwc6T3DMrCYiE/GFsA+Of8DWq1dRhjG1yZkhLWWTy1cHSjmNHiEn5UHsgin1VdikyJvU73PnesPZeG920x+NF4GzQ6Kw+ympeTglEWb+hupGlk1Hl6vGp9YR5YNJc4tyIvjJCmlbxeE5ZVjmXXjCnbW0PbClvwm2Do1AyU2FnG9yMNo7mgNdOW/FdHb4E/HXmcJnIxB00fvLEP8EoDR5nMwd+0rmgBYJ4QiEUly/m89UasOC13LYoCvPa+WE76U1dRMqY3FAQs6G1lor81llWg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=92IsjRMkLdoZnpTIMgMSIhya2q0pU556PrDqsIDCz/4=;\n b=LUxsiwdzus4T9kADu1LoNy9umI5ghC/CXamt75F9Cv7nXY+amN2r/AbHBixv7abTxuDyOHvkZP+gQAku8+WhJA2wYEAUtRfE22+iQDpAWLs4TTovve0jT0rN1ukx+FWULm4UBFvQe4ceDxMjWSWfyrnUaaUuNH4iOyZYWGVjDuYKw3Mjn/For3QFUlCIHtKWveYkP3BQG13pzTFO1Yn9SGmTNRJdMdt2vAlqzVx6hjxp/aqJs3UJDuyZQyO2GJpKdNH99gfASkogZOadb8O6VoAFkr4IzvYlmFJVcRrmikUzkXmmcNOUjtMGSt5+gxr+d3odHlGasg40Ce7dXA+/2Q==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, =?utf-8?b?wqA=?= <mkashani@nvidia.com>,\n <rasland@nvidia.com>, Suanming Mou <suanmingm@nvidia.com>,\n Matan Azrad <matan@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>",
        "Subject": "[PATCH v4 10/10] net/mlx5: support indirect list METER_MARK action",
        "Date": "Mon, 23 Oct 2023 15:42:25 +0300",
        "Message-ID": "<20231023124225.141461-11-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20231023124225.141461-1-getelson@nvidia.com>",
        "References": "<20231017080928.30454-1-getelson@nvidia.com>\n <20231023124225.141461-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CO1PEPF000044F0:EE_|SJ0PR12MB8139:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "f4b84f11-1103-411f-738f-08dbd3c5ac48",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n M3+9iDF6Xt4EcBhvrZP8YwHPy0evHeFFNY3dFBZlqncggwDlKDWp0izUpzaSjLiLJujmo/kqOfkWxwVOSVBNH67EPmdhHC90ZL7tASuBVF6z35nG5PttXVrypGesRbQ08QMBWc/xe4qPrBCtGnPR2434o0hZ/HdwpDJwUQ/R118oFy7xM+GJ+mjAe8IOBMYZuTZ4bQTGidVG2GDFZYlW79qG4foSoJbzY9w39LRrsGfcNQmCguTSyGdVLrPJvUI4qSD+c3MtlVX8QHPDI4hZQxJlPH7kFnJnzxSOZv7qIBtWDHes0Jzi7l3V1KoAhhpKj5e/PE8N1mt5boVINkT1lF0HWrYQVEJ51ulexjbaQ1TRQoRwYvLM7CFFDQlaiDNv7fGYIevkLKdNwECnLdudOkqgsWj9zs2T/hpoDH4qDgcq0Et913B1xdx1vvp6M+vVKNePalnjQ+vnXN6YAnJrFy062zaSrPftYwuEDs73591kytYL0PWGNM88Sv0lvLS3jiDjED/4VwL7RBmM3XLeW9fbyVVBpS6eVqiMBdOZOw5vHNYR81Lrn9c645gCRIVIj7LoEKWhv0akNwVPtM6/EBGL2SJUP+6F5RTtX300KlkjQFeC3bHc7EMCW8SgLJhdrgPvNqg/wOrRY//p60ECzC+fU2NTufMsheizs3d8rkRlNWixzUI5cHz7CI9DLf4yuLOltJtIjnyuRnlE7Hl9wUMZwwP09NaA41VYiCk/PSCjxwb4o4ARR4XCv6l72fxo",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(4636009)(39860400002)(396003)(376002)(346002)(136003)(230922051799003)(451199024)(1800799009)(186009)(82310400011)(64100799003)(36840700001)(40470700004)(46966006)(6916009)(2906002)(55016003)(30864003)(40460700003)(5660300002)(8936002)(8676002)(36756003)(4326008)(40480700001)(54906003)(41300700001)(70586007)(86362001)(70206006)(478600001)(7696005)(1076003)(316002)(107886003)(426003)(26005)(2616005)(7636003)(82740400003)(336012)(36860700001)(6286002)(16526019)(356005)(83380400001)(47076005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "23 Oct 2023 12:43:35.3629 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n f4b84f11-1103-411f-738f-08dbd3c5ac48",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1PEPF000044F0.namprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SJ0PR12MB8139",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Signed-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c    |  69 ++++-\n drivers/net/mlx5/mlx5_flow.h    |  70 ++++--\n drivers/net/mlx5/mlx5_flow_hw.c | 430 +++++++++++++++++++++++++++-----\n 3 files changed, 484 insertions(+), 85 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 693d1320e1..16fce9c64e 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)\n \t\tswitch (e->type) {\n #ifdef HAVE_MLX5_HWS_SUPPORT\n \t\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n-\t\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);\n+\t\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);\n \t\tbreak;\n+\t\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n+\t\t\tmlx5_destroy_legacy_indirect(dev, e);\n+\t\t\tbreak;\n #endif\n \t\tdefault:\n \t\t\tDRV_LOG(ERR, \"invalid indirect list type\");\n@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy\n \t\t\t const struct rte_flow_op_attr *op_attr,\n \t\t\t struct rte_flow_action_list_handle *action_handle,\n \t\t\t void *user_data, struct rte_flow_error *error);\n-\n+static int\n+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t  const\n+\t\t\t\t\t  struct rte_flow_action_list_handle *handle,\n+\t\t\t\t\t  const void **update, void **query,\n+\t\t\t\t\t  enum rte_flow_query_update_mode mode,\n+\t\t\t\t\t  struct rte_flow_error *error);\n+static int\n+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t\tuint32_t queue_id,\n+\t\t\t\t\t\tconst struct rte_flow_op_attr *attr,\n+\t\t\t\t\t\tconst struct\n+\t\t\t\t\t\trte_flow_action_list_handle *handle,\n+\t\t\t\t\t\tconst void **update,\n+\t\t\t\t\t\tvoid **query,\n+\t\t\t\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\t\t\t\tvoid *user_data,\n+\t\t\t\t\t\tstruct rte_flow_error *error);\n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n \t.create = mlx5_flow_create,\n@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t\tmlx5_flow_async_action_list_handle_create,\n \t.async_action_list_handle_destroy =\n \t\tmlx5_flow_async_action_list_handle_destroy,\n+\t.action_list_handle_query_update =\n+\t\tmlx5_flow_action_list_handle_query_update,\n+\t.async_action_list_handle_query_update =\n+\t\tmlx5_flow_async_action_list_handle_query_update,\n };\n \n /* Tunnel information. */\n@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy\n \t\t\t\t\t\t      error);\n }\n \n+static int\n+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t  const\n+\t\t\t\t\t  struct rte_flow_action_list_handle *handle,\n+\t\t\t\t\t  const void **update, void **query,\n+\t\t\t\t\t  enum rte_flow_query_update_mode mode,\n+\t\t\t\t\t  struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops,\n+\t\t\t     action_list_handle_query_update, ENOTSUP);\n+\treturn fops->action_list_handle_query_update(dev, handle, update, query,\n+\t\t\t\t\t\t     mode, error);\n+}\n+\n+static int\n+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t\tuint32_t queue_id,\n+\t\t\t\t\t\tconst\n+\t\t\t\t\t\tstruct rte_flow_op_attr *op_attr,\n+\t\t\t\t\t\tconst struct\n+\t\t\t\t\t\trte_flow_action_list_handle *handle,\n+\t\t\t\t\t\tconst void **update,\n+\t\t\t\t\t\tvoid **query,\n+\t\t\t\t\t\tenum\n+\t\t\t\t\t\trte_flow_query_update_mode mode,\n+\t\t\t\t\t\tvoid *user_data,\n+\t\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops,\n+\t\t\t     async_action_list_handle_query_update, ENOTSUP);\n+\treturn fops->async_action_list_handle_query_update(dev, queue_id, op_attr,\n+\t\t\t\t\t\t\t   handle, update,\n+\t\t\t\t\t\t\t   query, mode,\n+\t\t\t\t\t\t\t   user_data, error);\n+}\n+\n+\n /**\n  * Destroy all indirect actions (shared RSS).\n  *\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 19b26ad333..58e345057f 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -98,25 +98,40 @@ enum mlx5_indirect_type{\n #define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX\n \n enum mlx5_indirect_list_type {\n-\tMLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,\n };\n \n-/*\n+/**\n  * Base type for indirect list type.\n- * Actual indirect list type MUST override that type and put type spec data\n- * after the `chain`.\n  */\n struct mlx5_indirect_list {\n-\t/* type field MUST be the first */\n+\t/* Indirect list type. */\n \tenum mlx5_indirect_list_type type;\n+\t/* Optional storage list entry */\n \tLIST_ENTRY(mlx5_indirect_list) entry;\n-\t/* put type specific data after chain */\n };\n \n+static __rte_always_inline void\n+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)\n+{\n+\tLIST_HEAD(, mlx5_indirect_list) *h = head;\n+\n+\tLIST_INSERT_HEAD(h, elem, entry);\n+}\n+\n+static __rte_always_inline void\n+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)\n+{\n+\tif (elem->entry.le_prev)\n+\t\tLIST_REMOVE(elem, entry);\n+}\n+\n static __rte_always_inline enum mlx5_indirect_list_type\n-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)\n+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)\n {\n-\treturn obj->type;\n+\treturn ((const struct mlx5_indirect_list *)obj)->type;\n }\n \n /* Matches on selected register. */\n@@ -1240,9 +1255,12 @@ struct rte_flow_hw {\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\n \n-struct mlx5dr_action;\n-typedef struct mlx5dr_action *\n-(*indirect_list_callback_t)(const struct rte_flow_action *);\n+struct mlx5_action_construct_data;\n+typedef int\n+(*indirect_list_callback_t)(struct rte_eth_dev *,\n+\t\t\t    const struct mlx5_action_construct_data *,\n+\t\t\t    const struct rte_flow_action *,\n+\t\t\t    struct mlx5dr_rule_action *);\n \n /* rte flow action translate to DR action struct. */\n struct mlx5_action_construct_data {\n@@ -1252,6 +1270,7 @@ struct mlx5_action_construct_data {\n \tuint32_t idx;  /* Data index. */\n \tuint16_t action_src; /* rte_flow_action src offset. */\n \tuint16_t action_dst; /* mlx5dr_rule_action dst offset. */\n+\tindirect_list_callback_t indirect_list_cb;\n \tunion {\n \t\tstruct {\n \t\t\t/* encap data len. */\n@@ -1291,10 +1310,8 @@ struct mlx5_action_construct_data {\n \t\t} shared_counter;\n \t\tstruct {\n \t\t\tuint32_t id;\n+\t\t\tuint32_t conf_masked:1;\n \t\t} shared_meter;\n-\t\tstruct {\n-\t\t\tindirect_list_callback_t cb;\n-\t\t} indirect_list;\n \t};\n };\n \n@@ -2017,7 +2034,21 @@ typedef int\n \t\t\t const struct rte_flow_op_attr *op_attr,\n \t\t\t struct rte_flow_action_list_handle *action_handle,\n \t\t\t void *user_data, struct rte_flow_error *error);\n-\n+typedef int\n+(*mlx5_flow_action_list_handle_query_update_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_action_list_handle *handle,\n+\t\t\tconst void **update, void **query,\n+\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\tstruct rte_flow_error *error);\n+typedef int\n+(*mlx5_flow_async_action_list_handle_query_update_t)\n+\t\t\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\tconst struct rte_flow_op_attr *attr,\n+\t\t\tconst struct rte_flow_action_list_handle *handle,\n+\t\t\tconst void **update, void **query,\n+\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\tvoid *user_data, struct rte_flow_error *error);\n \n struct mlx5_flow_driver_ops {\n \tmlx5_flow_validate_t validate;\n@@ -2085,6 +2116,10 @@ struct mlx5_flow_driver_ops {\n \t\tasync_action_list_handle_create;\n \tmlx5_flow_async_action_list_handle_destroy_t\n \t\tasync_action_list_handle_destroy;\n+\tmlx5_flow_action_list_handle_query_update_t\n+\t\taction_list_handle_query_update;\n+\tmlx5_flow_async_action_list_handle_query_update_t\n+\t\tasync_action_list_handle_query_update;\n };\n \n /* mlx5_flow.c */\n@@ -2820,6 +2855,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);\n #ifdef HAVE_MLX5_HWS_SUPPORT\n struct mlx5_mirror;\n void\n-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);\n+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);\n+void\n+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_indirect_list *ptr);\n #endif\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex e8544a4f2b..5daec3524d 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -61,16 +61,23 @@\n #define MLX5_MIRROR_MAX_CLONES_NUM 3\n #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4\n \n+#define MLX5_HW_PORT_IS_PROXY(priv) \\\n+\t(!!((priv)->sh->esw_mode && (priv)->master))\n+\n+\n+struct mlx5_indlst_legacy {\n+\tstruct mlx5_indirect_list indirect;\n+\tstruct rte_flow_action_handle *handle;\n+\tenum rte_flow_action_type legacy_type;\n+};\n+\n struct mlx5_mirror_clone {\n \tenum rte_flow_action_type type;\n \tvoid *action_ctx;\n };\n \n struct mlx5_mirror {\n-\t/* type field MUST be the first */\n-\tenum mlx5_indirect_list_type type;\n-\tLIST_ENTRY(mlx5_indirect_list) entry;\n-\n+\tstruct mlx5_indirect_list indirect;\n \tuint32_t clones_num;\n \tstruct mlx5dr_action *mirror_action;\n \tstruct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];\n@@ -598,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,\n \tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n \tif (!act_data)\n \t\treturn -1;\n-\tact_data->indirect_list.cb = cb;\n+\tact_data->indirect_list_cb = cb;\n \tLIST_INSERT_HEAD(&acts->act_list, act_data, next);\n \treturn 0;\n }\n@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static struct mlx5dr_action *\n-flow_hw_mirror_action(const struct rte_flow_action *action)\n+static int\n+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t  __rte_unused const struct mlx5_action_construct_data *act_data,\n+\t\t\t\t  const struct rte_flow_action *action,\n+\t\t\t\t  struct mlx5dr_rule_action *dr_rule)\n+{\n+\tconst struct rte_flow_action_indirect_list *list_conf = action->conf;\n+\tconst struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;\n+\n+\tdr_rule->action = mirror->mirror_action;\n+\treturn 0;\n+}\n+\n+/**\n+ * HWS mirror implemented as FW island.\n+ * The action does not support indirect list flow configuration.\n+ * If template handle was masked, use handle mirror action in flow rules.\n+ * Otherwise let flow rule specify mirror handle.\n+ */\n+static int\n+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,\n+\t\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t\t const struct rte_flow_action *mask,\n+\t\t\t\t\t struct mlx5_hw_actions *acts,\n+\t\t\t\t\t uint16_t action_src, uint16_t action_dst)\n+{\n+\tint ret = 0;\n+\tconst struct rte_flow_action_indirect_list *mask_conf = mask->conf;\n+\n+\tif (mask_conf && mask_conf->handle) {\n+\t\t/**\n+\t\t * If mirror handle was masked, assign fixed DR5 mirror action.\n+\t\t */\n+\t\tflow_hw_translate_indirect_mirror(dev, NULL, action,\n+\t\t\t\t\t\t  &acts->rule_acts[action_dst]);\n+\t} else {\n+\t\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\t\tret = flow_hw_act_data_indirect_list_append\n+\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n+\t\t\t action_src, action_dst,\n+\t\t\t flow_hw_translate_indirect_mirror);\n+\t}\n+\treturn ret;\n+}\n+\n+static int\n+flow_dr_set_meter(struct mlx5_priv *priv,\n+\t\t  struct mlx5dr_rule_action *dr_rule,\n+\t\t  const struct rte_flow_action_indirect_list *action_conf)\n+{\n+\tconst struct mlx5_indlst_legacy *legacy_obj =\n+\t\t(typeof(legacy_obj))action_conf->handle;\n+\tstruct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;\n+\tuint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n+\tstruct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);\n+\n+\tif (!aso_mtr)\n+\t\treturn -EINVAL;\n+\tdr_rule->action = mtr_pool->action;\n+\tdr_rule->aso_meter.offset = aso_mtr->offset;\n+\treturn 0;\n+}\n+\n+__rte_always_inline static void\n+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)\n+{\n+\tdr_rule->aso_meter.init_color =\n+\t\t(enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);\n+}\n+\n+static int\n+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,\n+\t\t\t\t const struct mlx5_action_construct_data *act_data,\n+\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t struct mlx5dr_rule_action *dr_rule)\n+{\n+\tint ret;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_indirect_list *action_conf = action->conf;\n+\tconst struct rte_flow_indirect_update_flow_meter_mark **flow_conf =\n+\t\t(typeof(flow_conf))action_conf->conf;\n+\n+\t/*\n+\t * Masked indirect handle set dr5 action during template table\n+\t * translation.\n+\t */\n+\tif (!dr_rule->action) {\n+\t\tret = flow_dr_set_meter(priv, dr_rule, action_conf);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tif (!act_data->shared_meter.conf_masked) {\n+\t\tif (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)\n+\t\t\tflow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,\n+\t\t\t\t\tconst struct rte_flow_action *action,\n+\t\t\t\t\tconst struct rte_flow_action *mask,\n+\t\t\t\t\tstruct mlx5_hw_actions *acts,\n+\t\t\t\t\tuint16_t action_src, uint16_t action_dst)\n+{\n+\tint ret;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_indirect_list *action_conf = action->conf;\n+\tconst struct rte_flow_action_indirect_list *mask_conf = mask->conf;\n+\tbool is_handle_masked = mask_conf && mask_conf->handle;\n+\tbool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];\n+\tstruct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];\n+\n+\tif (is_handle_masked) {\n+\t\tret = flow_dr_set_meter(priv, dr_rule, action->conf);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tif (is_conf_masked) {\n+\t\tconst struct\n+\t\t\trte_flow_indirect_update_flow_meter_mark **flow_conf =\n+\t\t\t(typeof(flow_conf))action_conf->conf;\n+\t\tflow_dr_mtr_flow_color(dr_rule,\n+\t\t\t\t       flow_conf[0]->init_color);\n+\t}\n+\tif (!is_handle_masked || !is_conf_masked) {\n+\t\tstruct mlx5_action_construct_data *act_data;\n+\n+\t\tret = flow_hw_act_data_indirect_list_append\n+\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n+\t\t\t action_src, action_dst, flow_hw_translate_indirect_meter);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tact_data = LIST_FIRST(&acts->act_list);\n+\t\tact_data->shared_meter.conf_masked = is_conf_masked;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,\n+\t\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t\t const struct rte_flow_action *mask,\n+\t\t\t\t\t struct mlx5_hw_actions *acts,\n+\t\t\t\t\t uint16_t action_src, uint16_t action_dst)\n {\n-\tstruct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;\n+\tint ret;\n+\tconst struct rte_flow_action_indirect_list *indlst_conf = action->conf;\n+\tstruct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;\n+\tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \n-\treturn mirror->mirror_action;\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_METER_MARK:\n+\t\tret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,\n+\t\t\t\t\t\t\t      acts, action_src,\n+\t\t\t\t\t\t\t      action_dst);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t\tbreak;\n+\t}\n+\treturn ret;\n }\n \n+/*\n+ * template .. indirect_list handle Ht conf Ct ..\n+ * mask     .. indirect_list handle Hm conf Cm ..\n+ *\n+ * PMD requires Ht != 0 to resolve handle type.\n+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will\n+ * not change. Otherwise, DR5 action will be resolved during flow rule build.\n+ * If Ct was masked (Cm != 0), table template processing updates base\n+ * indirect action configuration with Ct parameters.\n+ */\n static int\n table_template_translate_indirect_list(struct rte_eth_dev *dev,\n \t\t\t\t       const struct rte_flow_action *action,\n \t\t\t\t       const struct rte_flow_action *mask,\n \t\t\t\t       struct mlx5_hw_actions *acts,\n-\t\t\t\t       uint16_t action_src,\n-\t\t\t\t       uint16_t action_dst)\n+\t\t\t\t       uint16_t action_src, uint16_t action_dst)\n {\n-\tint ret;\n-\tbool is_masked = action->conf && mask->conf;\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tint ret = 0;\n \tenum mlx5_indirect_list_type type;\n+\tconst struct rte_flow_action_indirect_list *list_conf = action->conf;\n \n-\tif (!action->conf)\n+\tif (!list_conf || !list_conf->handle)\n \t\treturn -EINVAL;\n-\ttype = mlx5_get_indirect_list_type(action->conf);\n+\ttype = mlx5_get_indirect_list_type(list_conf->handle);\n \tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n+\t\tret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,\n+\t\t\t\t\t\t\t       acts, action_src,\n+\t\t\t\t\t\t\t       action_dst);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n-\t\tif (is_masked) {\n-\t\t\tacts->rule_acts[action_dst].action = flow_hw_mirror_action(action);\n-\t\t} else {\n-\t\t\tret = flow_hw_act_data_indirect_list_append\n-\t\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n-\t\t\t\t action_src, action_dst, flow_hw_mirror_action);\n-\t\t\tif (ret)\n-\t\t\t\treturn ret;\n-\t\t}\n+\t\tret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,\n+\t\t\t\t\t\t\t       acts, action_src,\n+\t\t\t\t\t\t\t       action_dst);\n \t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n \t}\n-\treturn 0;\n+\treturn ret;\n }\n \n /**\n@@ -2366,8 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\t    (int)action->type == act_data->type);\n \t\tswitch ((int)act_data->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n-\t\t\trule_acts[act_data->action_dst].action =\n-\t\t\t\tact_data->indirect_list.cb(action);\n+\t\t\tact_data->indirect_list_cb(dev, act_data, actions,\n+\t\t\t\t\t\t   &rule_acts[act_data->action_dst]);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tif (flow_hw_shared_action_construct\n@@ -4664,20 +4836,11 @@ action_template_set_type(struct rte_flow_actions_template *at,\n }\n \n static int\n-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,\n-\t\t\t\t\t  unsigned int action_src,\n+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,\n \t\t\t\t\t  enum mlx5dr_action_type *action_types,\n \t\t\t\t\t  uint16_t *curr_off, uint16_t *cnt_off,\n \t\t\t\t\t  struct rte_flow_actions_template *at)\n {\n-\tuint32_t type;\n-\n-\tif (!mask) {\n-\t\tDRV_LOG(WARNING, \"Unable to determine indirect action type \"\n-\t\t\t\"without a mask specified\");\n-\t\treturn -EINVAL;\n-\t}\n-\ttype = mask->type;\n \tswitch (type) {\n \tcase RTE_FLOW_ACTION_TYPE_RSS:\n \t\taction_template_set_type(at, action_types, action_src, curr_off,\n@@ -4718,12 +4881,24 @@ static int\n flow_hw_template_actions_list(struct rte_flow_actions_template *at,\n \t\t\t      unsigned int action_src,\n \t\t\t      enum mlx5dr_action_type *action_types,\n-\t\t\t      uint16_t *curr_off)\n+\t\t\t      uint16_t *curr_off, uint16_t *cnt_off)\n {\n-\tenum mlx5_indirect_list_type list_type;\n+\tint ret;\n+\tconst struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;\n+\tenum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);\n+\tconst union {\n+\t\tstruct mlx5_indlst_legacy *legacy;\n+\t\tstruct rte_flow_action_list_handle *handle;\n+\t} indlst_obj = { .handle = indlst_conf->handle };\n \n-\tlist_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);\n \tswitch (list_type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n+\t\tret = flow_hw_dr_actions_template_handle_shared\n+\t\t\t(indlst_obj.legacy->legacy_type, action_src,\n+\t\t\t action_types, curr_off, cnt_off, at);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n \t\taction_template_set_type(at, action_types, action_src, curr_off,\n \t\t\t\t\t MLX5DR_ACTION_TYP_DEST_ARRAY);\n@@ -4769,17 +4944,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n \t\t\tret = flow_hw_template_actions_list(at, i, action_types,\n-\t\t\t\t\t\t\t    &curr_off);\n+\t\t\t\t\t\t\t    &curr_off, &cnt_off);\n \t\t\tif (ret)\n \t\t\t\treturn NULL;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tret = flow_hw_dr_actions_template_handle_shared\n-\t\t\t\t\t\t\t\t (&at->masks[i],\n-\t\t\t\t\t\t\t\t  i,\n-\t\t\t\t\t\t\t\t  action_types,\n-\t\t\t\t\t\t\t\t  &curr_off,\n-\t\t\t\t\t\t\t\t  &cnt_off, at);\n+\t\t\t\t(at->masks[i].type, i, action_types,\n+\t\t\t\t &curr_off, &cnt_off, at);\n \t\t\tif (ret)\n \t\t\t\treturn NULL;\n \t\t\tbreak;\n@@ -5259,9 +5431,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \t\t * Need to restore the indirect action index from action conf here.\n \t\t */\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n-\t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n-\t\t\tat->actions[i].conf = actions->conf;\n-\t\t\tat->masks[i].conf = masks->conf;\n+\t\t\tat->actions[i].conf = ra[i].conf;\n+\t\t\tat->masks[i].conf = rm[i].conf;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:\n \t\t\tinfo = actions->conf;\n@@ -9519,18 +9690,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,\n }\n \n void\n-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)\n+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)\n {\n \tuint32_t i;\n \n-\tif (mirror->entry.le_prev)\n-\t\tLIST_REMOVE(mirror, entry);\n+\tmlx5_indirect_list_remove_entry(&mirror->indirect);\n \tfor(i = 0; i < mirror->clones_num; i++)\n \t\tmlx5_mirror_destroy_clone(dev, &mirror->clone[i]);\n \tif (mirror->mirror_action)\n \t\tmlx5dr_action_destroy(mirror->mirror_action);\n-    if (release)\n-\t    mlx5_free(mirror);\n+\tmlx5_free(mirror);\n }\n \n static inline enum mlx5dr_table_type\n@@ -9825,7 +9994,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,\n \t\t\t\t   actions, \"Failed to allocate mirror context\");\n \t\treturn NULL;\n \t}\n-\tmirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;\n+\n+\tmirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;\n \tmirror->clones_num = clones_num;\n \tfor (i = 0; i < clones_num; i++) {\n \t\tconst struct rte_flow_action *clone_actions;\n@@ -9857,15 +10027,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,\n \t\tgoto error;\n \t}\n \n-\tLIST_INSERT_HEAD(&priv->indirect_list_head,\n-\t\t\t (struct mlx5_indirect_list *)mirror, entry);\n+\tmlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);\n \treturn (struct rte_flow_action_list_handle *)mirror;\n \n error:\n-\tmlx5_hw_mirror_destroy(dev, mirror, true);\n+\tmlx5_hw_mirror_destroy(dev, mirror);\n \treturn NULL;\n }\n \n+void\n+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_indirect_list *ptr)\n+{\n+\tstruct mlx5_indlst_legacy *obj = (typeof(obj))ptr;\n+\n+\tswitch (obj->legacy_type) {\n+\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\tbreak; /* ASO meters were released in mlx5_flow_meter_flush() */\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\tmlx5_free(obj);\n+}\n+\n+static struct rte_flow_action_list_handle *\n+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t  const struct rte_flow_op_attr *attr,\n+\t\t\t  const struct rte_flow_indir_action_conf *conf,\n+\t\t\t  const struct rte_flow_action *actions,\n+\t\t\t  void *user_data, struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,\n+\t\t\t\t\t\t\t    sizeof(*indlst_obj),\n+\t\t\t\t\t\t\t    0, SOCKET_ID_ANY);\n+\n+\tif (!indlst_obj)\n+\t\treturn NULL;\n+\tindlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,\n+\t\t\t\t\t\t\t  actions, user_data,\n+\t\t\t\t\t\t\t  error);\n+\tif (!indlst_obj->handle) {\n+\t\tmlx5_free(indlst_obj);\n+\t\treturn NULL;\n+\t}\n+\tindlst_obj->legacy_type = actions[0].type;\n+\tindlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;\n+\tmlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);\n+\treturn (struct rte_flow_action_list_handle *)indlst_obj;\n+}\n+\n+static __rte_always_inline enum mlx5_indirect_list_type\n+flow_hw_inlist_type_get(const struct rte_flow_action *actions)\n+{\n+\tswitch (actions[0].type) {\n+\tcase RTE_FLOW_ACTION_TYPE_SAMPLE:\n+\t\treturn MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;\n+\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\treturn actions[1].type == RTE_FLOW_ACTION_TYPE_END ?\n+\t\t       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :\n+\t\t       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;\n+}\n+\n static struct rte_flow_action_list_handle *\n flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t\tconst struct rte_flow_op_attr *attr,\n@@ -9876,6 +10103,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n {\n \tstruct mlx5_hw_q_job *job = NULL;\n \tbool push = flow_hw_action_push(attr);\n+\tenum mlx5_indirect_list_type list_type;\n \tstruct rte_flow_action_list_handle *handle;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct mlx5_flow_template_table_cfg table_cfg = {\n@@ -9894,6 +10122,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t   NULL, \"No action list\");\n \t\treturn NULL;\n \t}\n+\tlist_type = flow_hw_inlist_type_get(actions);\n+\tif (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {\n+\t\t/*\n+\t\t * Legacy indirect actions already have\n+\t\t * async resources management. No need to do it twice.\n+\t\t */\n+\t\thandle = mlx5_create_legacy_indlst(dev, queue, attr, conf,\n+\t\t\t\t\t\t   actions, user_data, error);\n+\t\tgoto end;\n+\t}\n \tif (attr) {\n \t\tjob = flow_hw_action_job_init(priv, queue, NULL, user_data,\n \t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,\n@@ -9901,8 +10139,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\tif (!job)\n \t\t\treturn NULL;\n \t}\n-\tswitch (actions[0].type) {\n-\tcase RTE_FLOW_ACTION_TYPE_SAMPLE:\n+\tswitch (list_type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n \t\thandle = mlx5_hw_mirror_handle_create(dev, &table_cfg,\n \t\t\t\t\t\t      actions, error);\n \t\tbreak;\n@@ -9916,6 +10154,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\tflow_hw_action_finalize(dev, queue, job, push, false,\n \t\t\t\t\thandle != NULL);\n \t}\n+end:\n \treturn handle;\n }\n \n@@ -9944,6 +10183,15 @@ flow_hw_async_action_list_handle_destroy\n \tenum mlx5_indirect_list_type type =\n \t\tmlx5_get_indirect_list_type((void *)handle);\n \n+\tif (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {\n+\t\tstruct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;\n+\n+\t\tret = flow_hw_action_handle_destroy(dev, queue, attr,\n+\t\t\t\t\t\t    legacy->handle,\n+\t\t\t\t\t\t    user_data, error);\n+\t\tmlx5_indirect_list_remove_entry(&legacy->indirect);\n+\t\tgoto end;\n+\t}\n \tif (attr) {\n \t\tjob = flow_hw_action_job_init(priv, queue, NULL, user_data,\n \t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,\n@@ -9953,20 +10201,17 @@ flow_hw_async_action_list_handle_destroy\n \t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n-\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);\n+\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);\n \t\tbreak;\n \tdefault:\n-\t\thandle = NULL;\n \t\tret = rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t  NULL, \"Invalid indirect list handle\");\n \t}\n \tif (job) {\n-\t\tjob->action = handle;\n-\t\tflow_hw_action_finalize(dev, queue, job, push, false,\n-\t\t\t\t       handle != NULL);\n+\t\tflow_hw_action_finalize(dev, queue, job, push, false, true);\n \t}\n-\tmlx5_free(handle);\n+end:\n \treturn ret;\n }\n \n@@ -9980,6 +10225,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\terror);\n }\n \n+static int\n+flow_hw_async_action_list_handle_query_update\n+\t\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t const struct rte_flow_op_attr *attr,\n+\t\t const struct rte_flow_action_list_handle *handle,\n+\t\t const void **update, void **query,\n+\t\t enum rte_flow_query_update_mode mode,\n+\t\t void *user_data, struct rte_flow_error *error)\n+{\n+\tenum mlx5_indirect_list_type type =\n+\t\tmlx5_get_indirect_list_type((const void *)handle);\n+\n+\tif (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {\n+\t\tstruct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;\n+\n+\t\tif (update && query)\n+\t\t\treturn flow_hw_async_action_handle_query_update\n+\t\t\t\t(dev, queue_id, attr, legacy->handle,\n+\t\t\t\t update, query, mode, user_data, error);\n+\t\telse if (update && update[0])\n+\t\t\treturn flow_hw_action_handle_update(dev, queue_id, attr,\n+\t\t\t\t\t\t\t    legacy->handle, update[0],\n+\t\t\t\t\t\t\t    user_data, error);\n+\t\telse if (query && query[0])\n+\t\t\treturn flow_hw_action_handle_query(dev, queue_id, attr,\n+\t\t\t\t\t\t\t   legacy->handle, query[0],\n+\t\t\t\t\t\t\t   user_data, error);\n+\t\telse\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\t  NULL, \"invalid legacy handle query_update parameters\");\n+\t}\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\tconst struct rte_flow_action_list_handle *handle,\n+\t\t\t\t\tconst void **update, void **query,\n+\t\t\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\treturn flow_hw_async_action_list_handle_query_update\n+\t\t\t\t\t(dev, MLX5_HW_INV_QUEUE, NULL, handle,\n+\t\t\t\t\t update, query, mode, NULL, error);\n+}\n+\n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.info_get = flow_hw_info_get,\n \t.configure = flow_hw_configure,\n@@ -10010,10 +10302,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.action_query_update = flow_hw_action_query_update,\n \t.action_list_handle_create = flow_hw_action_list_handle_create,\n \t.action_list_handle_destroy = flow_hw_action_list_handle_destroy,\n+\t.action_list_handle_query_update =\n+\t\tflow_hw_action_list_handle_query_update,\n \t.async_action_list_handle_create =\n \t\tflow_hw_async_action_list_handle_create,\n \t.async_action_list_handle_destroy =\n \t\tflow_hw_async_action_list_handle_destroy,\n+\t.async_action_list_handle_query_update =\n+\t\tflow_hw_async_action_list_handle_query_update,\n \t.query = flow_hw_query,\n \t.get_aged_flows = flow_hw_get_aged_flows,\n \t.get_q_aged_flows = flow_hw_get_q_aged_flows,\n",
    "prefixes": [
        "v4",
        "10/10"
    ]
}