get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133302/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133302,
    "url": "http://patches.dpdk.org/api/patches/133302/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231025112232.201606-11-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231025112232.201606-11-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231025112232.201606-11-getelson@nvidia.com",
    "date": "2023-10-25T11:22:31",
    "name": "[v6,10/10] net/mlx5: support indirect list METER_MARK action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "228b6459d05bbf589c48fc04f1488f9fb2415358",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231025112232.201606-11-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 29979,
            "url": "http://patches.dpdk.org/api/series/29979/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29979",
            "date": "2023-10-25T11:22:21",
            "name": "net/mlx5: support indirect actions list",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/29979/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/133302/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/133302/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 0CDE4431FB;\n\tWed, 25 Oct 2023 13:24:28 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id DE400427E0;\n\tWed, 25 Oct 2023 13:23:38 +0200 (CEST)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2048.outbound.protection.outlook.com [40.107.93.48])\n by mails.dpdk.org (Postfix) with ESMTP id 9A9C942DCB\n for <dev@dpdk.org>; Wed, 25 Oct 2023 13:23:37 +0200 (CEST)",
            "from MW3PR05CA0027.namprd05.prod.outlook.com (2603:10b6:303:2b::32)\n by MN6PR12MB8490.namprd12.prod.outlook.com (2603:10b6:208:470::20)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6907.26; Wed, 25 Oct\n 2023 11:23:35 +0000",
            "from CO1PEPF000044FA.namprd21.prod.outlook.com\n (2603:10b6:303:2b:cafe::cf) by MW3PR05CA0027.outlook.office365.com\n (2603:10b6:303:2b::32) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6933.15 via Frontend\n Transport; Wed, 25 Oct 2023 11:23:35 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n CO1PEPF000044FA.mail.protection.outlook.com (10.167.241.200) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6954.0 via Frontend Transport; Wed, 25 Oct 2023 11:23:34 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 25 Oct\n 2023 04:23:19 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 25 Oct\n 2023 04:23:17 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=IYFHUJ7TjZ7hujgy2xOdWbN942PZ+gTOhuioX4XBn8GCRPalc2QGMPuIulO5yrrMtJKdLSg1XjOqMuDfUKdTqkeb8oAkaosgdEd8feCmSZqcrJG/+Z9JtzdKjt9C7Giy6a4S1UfP7L5KNc1HKhY70+iVWMLeiGi+DQW9rldomvYM4j/r0yxwJhnI9Z4W9kCJr2gd3Cz4IAwjcFkAfrrp0oSc9OYUgmfAw+6a9N8aiTnoR+6kmwYg3R7agQK2IAYUyjDm1hpMmVVDO8CyRAbp2RTMkcCDcFNhng+E+BdWu8U7Mz6PhzottgMqtsF+1pQXDfaT1oJbP9G2oPhCtVN7Sw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=nzD8JdhWoQER2RwdyZN0LFuXM1jLW6camGxx8t4LmgQ=;\n b=SuRKJKDged2y5JG7lNJBzP8k77jZH532eAtbPTgHjSXUhPCy/hQm6uhRI5QWp5X5abtU8wn07TYlNoOs4/TOwlzq6dHR+PnJXOA4oCFwlNIlVecsz5BDVGL3Se99g2hm/U9L+lSgwCnQ4px9mSPq2ojRZyg83o7qEGySAG+p5wCEKwFhRJ4xdHGLzc+m8obQgHvCAKyXWnWxm/Jxz4aunLw84G3JU6F4hlLpoykhgHoW7j+Erc6dkSqW6pUVzx9M5pglp05mDBncGztW8Dyt+q0EB7gwwIhqy/CmMrFqvT0Z4QYz6Sljk1XEoYgaEW3uQGBk0EiU+00nv1PPObwPVw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=nzD8JdhWoQER2RwdyZN0LFuXM1jLW6camGxx8t4LmgQ=;\n b=m0hmi00hnKq8p9XL318q5J7PEjg+3Rd8Sev4Ie9kxMpGIVExcMISOR5CzyKrNSK6yVGHDvCWbR3cWyA0GZ2RAjE1Iq2IYfr6M0+orLi7Kj1CfWnF1fpoFWNdZ/tfpAxV8QGilufAWLVOQU3D6NH/aKZcCuj/3vZt5QwekDZt2XFNrwj8AP/CKkmSz5iKtWbYZDYNusT/kvOKjDmHxG98QqwQoXG18b1LGBIVEROHZrhrCPVFzgQdU8YEtTz9bXMgTKmWcm8whQyE3IZOUghfG4+M3qTdjhjSyfcciQ2d7bJQEVT9He+DHkrmZKRyhvg+dKmComtbfq7m7nfSXmv28w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, =?utf-8?b?wqA=?= <mkashani@nvidia.com>,\n <rasland@nvidia.com>, Suanming Mou <suanmingm@nvidia.com>,\n Matan Azrad <matan@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>",
        "Subject": "[PATCH v6 10/10] net/mlx5: support indirect list METER_MARK action",
        "Date": "Wed, 25 Oct 2023 14:22:31 +0300",
        "Message-ID": "<20231025112232.201606-11-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20231025112232.201606-1-getelson@nvidia.com>",
        "References": "<20231017080928.30454-1-getelson@nvidia.com>\n <20231025112232.201606-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CO1PEPF000044FA:EE_|MN6PR12MB8490:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "068ee9c1-31b4-4664-231c-08dbd54cd3c7",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n PAZbTUfBAIibOtKGRIp9358XGP+aO4RISxYbU26XRv2YWA2m9u3yTzS9DsVgljJUK9gOQS6mi/2TsXWD4+zy3qpZP5SBvmfqujzLEviT+GgN0uACI9ZfNLpQnXuGypeoga7/+e28cH9ZhrX+sEWbqgF3NveioZb41vSyrf7P/DHd7nJcltGGCGF3qH7uEm0KaQdE9HWAVyppPLAv62egOyXE2i+J+Mn257Ti0+KJLjK1d+W3ZDBbxyJ+TxKe0OIrrkUpl/pDWYXa8VC6SLA5YV88V4xuNbh0YoUeTPjEZ19ebjE/wX0pPFWmL+IHQpG7UlOpA/7OHJfyZsg7N6lWMkSEW0SOOMboVlBBvSbOG2G4IGGNfKyXw0DPr018BO/scIgbmszqF4QrjynsMcopeBBz5DlHgUe0BTZijhfIE63sqOklIUOeRAJ1c6dhwHQX+rGrpAQ7eaSfUEIxaj5sSUWK+AVcwwPX8/bzSfh7JtW8x5z+fpmqx9HmYAm2hKLAGFgMfE3emdk4Occ7wZF/EZB1Gwcc1+7H7Sb1fpjAB6NnRPRqdbq51OhpATMjCgE4RKBixp8VTOIIPTh0ri75sVbT3KEMqNcMWzwzbYfnNH0tANu7vTU1r1KNO870OFS24KuUXFzErbKg3AL8bAAFUyP1eoD/yjyYhyeoTYwxtL4un2P5ieK8wl02OXg1HorC4BhdpTNulfEuQXF+1IywOVnlZyZUhaO5ymZugmsogcE3z5cxFyz+6fhl3HBkrgBC",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230031)(4636009)(396003)(39860400002)(376002)(346002)(136003)(230922051799003)(82310400011)(186009)(1800799009)(64100799003)(451199024)(46966006)(36840700001)(40470700004)(70586007)(83380400001)(26005)(6286002)(2616005)(336012)(107886003)(426003)(6666004)(16526019)(7696005)(86362001)(36756003)(82740400003)(356005)(7636003)(36860700001)(47076005)(1076003)(40460700003)(4326008)(41300700001)(8936002)(8676002)(478600001)(6916009)(5660300002)(40480700001)(2906002)(30864003)(70206006)(316002)(54906003)(55016003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "25 Oct 2023 11:23:34.9549 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 068ee9c1-31b4-4664-231c-08dbd54cd3c7",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1PEPF000044FA.namprd21.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MN6PR12MB8490",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Signed-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c    |  69 ++++-\n drivers/net/mlx5/mlx5_flow.h    |  70 ++++--\n drivers/net/mlx5/mlx5_flow_hw.c | 430 +++++++++++++++++++++++++++-----\n 3 files changed, 484 insertions(+), 85 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 99b814d815..34252d66c0 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)\n \t\tswitch (e->type) {\n #ifdef HAVE_MLX5_HWS_SUPPORT\n \t\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n-\t\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);\n+\t\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);\n \t\tbreak;\n+\t\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n+\t\t\tmlx5_destroy_legacy_indirect(dev, e);\n+\t\t\tbreak;\n #endif\n \t\tdefault:\n \t\t\tDRV_LOG(ERR, \"invalid indirect list type\");\n@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy\n \t\t\t const struct rte_flow_op_attr *op_attr,\n \t\t\t struct rte_flow_action_list_handle *action_handle,\n \t\t\t void *user_data, struct rte_flow_error *error);\n-\n+static int\n+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t  const\n+\t\t\t\t\t  struct rte_flow_action_list_handle *handle,\n+\t\t\t\t\t  const void **update, void **query,\n+\t\t\t\t\t  enum rte_flow_query_update_mode mode,\n+\t\t\t\t\t  struct rte_flow_error *error);\n+static int\n+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t\tuint32_t queue_id,\n+\t\t\t\t\t\tconst struct rte_flow_op_attr *attr,\n+\t\t\t\t\t\tconst struct\n+\t\t\t\t\t\trte_flow_action_list_handle *handle,\n+\t\t\t\t\t\tconst void **update,\n+\t\t\t\t\t\tvoid **query,\n+\t\t\t\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\t\t\t\tvoid *user_data,\n+\t\t\t\t\t\tstruct rte_flow_error *error);\n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n \t.create = mlx5_flow_create,\n@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t\tmlx5_flow_async_action_list_handle_create,\n \t.async_action_list_handle_destroy =\n \t\tmlx5_flow_async_action_list_handle_destroy,\n+\t.action_list_handle_query_update =\n+\t\tmlx5_flow_action_list_handle_query_update,\n+\t.async_action_list_handle_query_update =\n+\t\tmlx5_flow_async_action_list_handle_query_update,\n };\n \n /* Tunnel information. */\n@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy\n \t\t\t\t\t\t      error);\n }\n \n+static int\n+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t  const\n+\t\t\t\t\t  struct rte_flow_action_list_handle *handle,\n+\t\t\t\t\t  const void **update, void **query,\n+\t\t\t\t\t  enum rte_flow_query_update_mode mode,\n+\t\t\t\t\t  struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops,\n+\t\t\t     action_list_handle_query_update, ENOTSUP);\n+\treturn fops->action_list_handle_query_update(dev, handle, update, query,\n+\t\t\t\t\t\t     mode, error);\n+}\n+\n+static int\n+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\t\tuint32_t queue_id,\n+\t\t\t\t\t\tconst\n+\t\t\t\t\t\tstruct rte_flow_op_attr *op_attr,\n+\t\t\t\t\t\tconst struct\n+\t\t\t\t\t\trte_flow_action_list_handle *handle,\n+\t\t\t\t\t\tconst void **update,\n+\t\t\t\t\t\tvoid **query,\n+\t\t\t\t\t\tenum\n+\t\t\t\t\t\trte_flow_query_update_mode mode,\n+\t\t\t\t\t\tvoid *user_data,\n+\t\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops,\n+\t\t\t     async_action_list_handle_query_update, ENOTSUP);\n+\treturn fops->async_action_list_handle_query_update(dev, queue_id, op_attr,\n+\t\t\t\t\t\t\t   handle, update,\n+\t\t\t\t\t\t\t   query, mode,\n+\t\t\t\t\t\t\t   user_data, error);\n+}\n+\n+\n /**\n  * Destroy all indirect actions (shared RSS).\n  *\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 653f83cf55..3ea2548d2b 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -98,25 +98,40 @@ enum mlx5_indirect_type {\n #define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX\n \n enum mlx5_indirect_list_type {\n-\tMLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,\n };\n \n-/*\n+/**\n  * Base type for indirect list type.\n- * Actual indirect list type MUST override that type and put type spec data\n- * after the `chain`.\n  */\n struct mlx5_indirect_list {\n-\t/* type field MUST be the first */\n+\t/* Indirect list type. */\n \tenum mlx5_indirect_list_type type;\n+\t/* Optional storage list entry */\n \tLIST_ENTRY(mlx5_indirect_list) entry;\n-\t/* put type specific data after chain */\n };\n \n+static __rte_always_inline void\n+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)\n+{\n+\tLIST_HEAD(, mlx5_indirect_list) *h = head;\n+\n+\tLIST_INSERT_HEAD(h, elem, entry);\n+}\n+\n+static __rte_always_inline void\n+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)\n+{\n+\tif (elem->entry.le_prev)\n+\t\tLIST_REMOVE(elem, entry);\n+}\n+\n static __rte_always_inline enum mlx5_indirect_list_type\n-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)\n+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)\n {\n-\treturn obj->type;\n+\treturn ((const struct mlx5_indirect_list *)obj)->type;\n }\n \n /* Matches on selected register. */\n@@ -1240,9 +1255,12 @@ struct rte_flow_hw {\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\n \n-struct mlx5dr_action;\n-typedef struct mlx5dr_action *\n-(*indirect_list_callback_t)(const struct rte_flow_action *);\n+struct mlx5_action_construct_data;\n+typedef int\n+(*indirect_list_callback_t)(struct rte_eth_dev *,\n+\t\t\t    const struct mlx5_action_construct_data *,\n+\t\t\t    const struct rte_flow_action *,\n+\t\t\t    struct mlx5dr_rule_action *);\n \n /* rte flow action translate to DR action struct. */\n struct mlx5_action_construct_data {\n@@ -1252,6 +1270,7 @@ struct mlx5_action_construct_data {\n \tuint32_t idx;  /* Data index. */\n \tuint16_t action_src; /* rte_flow_action src offset. */\n \tuint16_t action_dst; /* mlx5dr_rule_action dst offset. */\n+\tindirect_list_callback_t indirect_list_cb;\n \tunion {\n \t\tstruct {\n \t\t\t/* encap data len. */\n@@ -1291,10 +1310,8 @@ struct mlx5_action_construct_data {\n \t\t} shared_counter;\n \t\tstruct {\n \t\t\tuint32_t id;\n+\t\t\tuint32_t conf_masked:1;\n \t\t} shared_meter;\n-\t\tstruct {\n-\t\t\tindirect_list_callback_t cb;\n-\t\t} indirect_list;\n \t};\n };\n \n@@ -2017,7 +2034,21 @@ typedef int\n \t\t\t const struct rte_flow_op_attr *op_attr,\n \t\t\t struct rte_flow_action_list_handle *action_handle,\n \t\t\t void *user_data, struct rte_flow_error *error);\n-\n+typedef int\n+(*mlx5_flow_action_list_handle_query_update_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_action_list_handle *handle,\n+\t\t\tconst void **update, void **query,\n+\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\tstruct rte_flow_error *error);\n+typedef int\n+(*mlx5_flow_async_action_list_handle_query_update_t)\n+\t\t\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\tconst struct rte_flow_op_attr *attr,\n+\t\t\tconst struct rte_flow_action_list_handle *handle,\n+\t\t\tconst void **update, void **query,\n+\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\tvoid *user_data, struct rte_flow_error *error);\n \n struct mlx5_flow_driver_ops {\n \tmlx5_flow_validate_t validate;\n@@ -2085,6 +2116,10 @@ struct mlx5_flow_driver_ops {\n \t\tasync_action_list_handle_create;\n \tmlx5_flow_async_action_list_handle_destroy_t\n \t\tasync_action_list_handle_destroy;\n+\tmlx5_flow_action_list_handle_query_update_t\n+\t\taction_list_handle_query_update;\n+\tmlx5_flow_async_action_list_handle_query_update_t\n+\t\tasync_action_list_handle_query_update;\n };\n \n /* mlx5_flow.c */\n@@ -2820,6 +2855,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);\n #ifdef HAVE_MLX5_HWS_SUPPORT\n struct mlx5_mirror;\n void\n-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);\n+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);\n+void\n+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_indirect_list *ptr);\n #endif\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex f9f735ba75..b6a474021a 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -61,16 +61,23 @@\n #define MLX5_MIRROR_MAX_CLONES_NUM 3\n #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4\n \n+#define MLX5_HW_PORT_IS_PROXY(priv) \\\n+\t(!!((priv)->sh->esw_mode && (priv)->master))\n+\n+\n+struct mlx5_indlst_legacy {\n+\tstruct mlx5_indirect_list indirect;\n+\tstruct rte_flow_action_handle *handle;\n+\tenum rte_flow_action_type legacy_type;\n+};\n+\n struct mlx5_mirror_clone {\n \tenum rte_flow_action_type type;\n \tvoid *action_ctx;\n };\n \n struct mlx5_mirror {\n-\t/* type field MUST be the first */\n-\tenum mlx5_indirect_list_type type;\n-\tLIST_ENTRY(mlx5_indirect_list) entry;\n-\n+\tstruct mlx5_indirect_list indirect;\n \tuint32_t clones_num;\n \tstruct mlx5dr_action *mirror_action;\n \tstruct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];\n@@ -598,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,\n \tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n \tif (!act_data)\n \t\treturn -1;\n-\tact_data->indirect_list.cb = cb;\n+\tact_data->indirect_list_cb = cb;\n \tLIST_INSERT_HEAD(&acts->act_list, act_data, next);\n \treturn 0;\n }\n@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static struct mlx5dr_action *\n-flow_hw_mirror_action(const struct rte_flow_action *action)\n+static int\n+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t  __rte_unused const struct mlx5_action_construct_data *act_data,\n+\t\t\t\t  const struct rte_flow_action *action,\n+\t\t\t\t  struct mlx5dr_rule_action *dr_rule)\n+{\n+\tconst struct rte_flow_action_indirect_list *list_conf = action->conf;\n+\tconst struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;\n+\n+\tdr_rule->action = mirror->mirror_action;\n+\treturn 0;\n+}\n+\n+/**\n+ * HWS mirror implemented as FW island.\n+ * The action does not support indirect list flow configuration.\n+ * If template handle was masked, use handle mirror action in flow rules.\n+ * Otherwise let flow rule specify mirror handle.\n+ */\n+static int\n+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,\n+\t\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t\t const struct rte_flow_action *mask,\n+\t\t\t\t\t struct mlx5_hw_actions *acts,\n+\t\t\t\t\t uint16_t action_src, uint16_t action_dst)\n+{\n+\tint ret = 0;\n+\tconst struct rte_flow_action_indirect_list *mask_conf = mask->conf;\n+\n+\tif (mask_conf && mask_conf->handle) {\n+\t\t/**\n+\t\t * If mirror handle was masked, assign fixed DR5 mirror action.\n+\t\t */\n+\t\tflow_hw_translate_indirect_mirror(dev, NULL, action,\n+\t\t\t\t\t\t  &acts->rule_acts[action_dst]);\n+\t} else {\n+\t\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\t\tret = flow_hw_act_data_indirect_list_append\n+\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n+\t\t\t action_src, action_dst,\n+\t\t\t flow_hw_translate_indirect_mirror);\n+\t}\n+\treturn ret;\n+}\n+\n+static int\n+flow_dr_set_meter(struct mlx5_priv *priv,\n+\t\t  struct mlx5dr_rule_action *dr_rule,\n+\t\t  const struct rte_flow_action_indirect_list *action_conf)\n+{\n+\tconst struct mlx5_indlst_legacy *legacy_obj =\n+\t\t(typeof(legacy_obj))action_conf->handle;\n+\tstruct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;\n+\tuint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n+\tstruct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);\n+\n+\tif (!aso_mtr)\n+\t\treturn -EINVAL;\n+\tdr_rule->action = mtr_pool->action;\n+\tdr_rule->aso_meter.offset = aso_mtr->offset;\n+\treturn 0;\n+}\n+\n+__rte_always_inline static void\n+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)\n+{\n+\tdr_rule->aso_meter.init_color =\n+\t\t(enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);\n+}\n+\n+static int\n+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,\n+\t\t\t\t const struct mlx5_action_construct_data *act_data,\n+\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t struct mlx5dr_rule_action *dr_rule)\n+{\n+\tint ret;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_indirect_list *action_conf = action->conf;\n+\tconst struct rte_flow_indirect_update_flow_meter_mark **flow_conf =\n+\t\t(typeof(flow_conf))action_conf->conf;\n+\n+\t/*\n+\t * Masked indirect handle set dr5 action during template table\n+\t * translation.\n+\t */\n+\tif (!dr_rule->action) {\n+\t\tret = flow_dr_set_meter(priv, dr_rule, action_conf);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tif (!act_data->shared_meter.conf_masked) {\n+\t\tif (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)\n+\t\t\tflow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,\n+\t\t\t\t\tconst struct rte_flow_action *action,\n+\t\t\t\t\tconst struct rte_flow_action *mask,\n+\t\t\t\t\tstruct mlx5_hw_actions *acts,\n+\t\t\t\t\tuint16_t action_src, uint16_t action_dst)\n+{\n+\tint ret;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_indirect_list *action_conf = action->conf;\n+\tconst struct rte_flow_action_indirect_list *mask_conf = mask->conf;\n+\tbool is_handle_masked = mask_conf && mask_conf->handle;\n+\tbool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];\n+\tstruct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];\n+\n+\tif (is_handle_masked) {\n+\t\tret = flow_dr_set_meter(priv, dr_rule, action->conf);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tif (is_conf_masked) {\n+\t\tconst struct\n+\t\t\trte_flow_indirect_update_flow_meter_mark **flow_conf =\n+\t\t\t(typeof(flow_conf))action_conf->conf;\n+\t\tflow_dr_mtr_flow_color(dr_rule,\n+\t\t\t\t       flow_conf[0]->init_color);\n+\t}\n+\tif (!is_handle_masked || !is_conf_masked) {\n+\t\tstruct mlx5_action_construct_data *act_data;\n+\n+\t\tret = flow_hw_act_data_indirect_list_append\n+\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n+\t\t\t action_src, action_dst, flow_hw_translate_indirect_meter);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tact_data = LIST_FIRST(&acts->act_list);\n+\t\tact_data->shared_meter.conf_masked = is_conf_masked;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,\n+\t\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t\t const struct rte_flow_action *mask,\n+\t\t\t\t\t struct mlx5_hw_actions *acts,\n+\t\t\t\t\t uint16_t action_src, uint16_t action_dst)\n {\n-\tstruct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;\n+\tint ret;\n+\tconst struct rte_flow_action_indirect_list *indlst_conf = action->conf;\n+\tstruct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;\n+\tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \n-\treturn mirror->mirror_action;\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_METER_MARK:\n+\t\tret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,\n+\t\t\t\t\t\t\t      acts, action_src,\n+\t\t\t\t\t\t\t      action_dst);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t\tbreak;\n+\t}\n+\treturn ret;\n }\n \n+/*\n+ * template .. indirect_list handle Ht conf Ct ..\n+ * mask     .. indirect_list handle Hm conf Cm ..\n+ *\n+ * PMD requires Ht != 0 to resolve handle type.\n+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will\n+ * not change. Otherwise, DR5 action will be resolved during flow rule build.\n+ * If Ct was masked (Cm != 0), table template processing updates base\n+ * indirect action configuration with Ct parameters.\n+ */\n static int\n table_template_translate_indirect_list(struct rte_eth_dev *dev,\n \t\t\t\t       const struct rte_flow_action *action,\n \t\t\t\t       const struct rte_flow_action *mask,\n \t\t\t\t       struct mlx5_hw_actions *acts,\n-\t\t\t\t       uint16_t action_src,\n-\t\t\t\t       uint16_t action_dst)\n+\t\t\t\t       uint16_t action_src, uint16_t action_dst)\n {\n-\tint ret;\n-\tbool is_masked = action->conf && mask->conf;\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tint ret = 0;\n \tenum mlx5_indirect_list_type type;\n+\tconst struct rte_flow_action_indirect_list *list_conf = action->conf;\n \n-\tif (!action->conf)\n+\tif (!list_conf || !list_conf->handle)\n \t\treturn -EINVAL;\n-\ttype = mlx5_get_indirect_list_type(action->conf);\n+\ttype = mlx5_get_indirect_list_type(list_conf->handle);\n \tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n+\t\tret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,\n+\t\t\t\t\t\t\t       acts, action_src,\n+\t\t\t\t\t\t\t       action_dst);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n-\t\tif (is_masked) {\n-\t\t\tacts->rule_acts[action_dst].action = flow_hw_mirror_action(action);\n-\t\t} else {\n-\t\t\tret = flow_hw_act_data_indirect_list_append\n-\t\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n-\t\t\t\t action_src, action_dst, flow_hw_mirror_action);\n-\t\t\tif (ret)\n-\t\t\t\treturn ret;\n-\t\t}\n+\t\tret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,\n+\t\t\t\t\t\t\t       acts, action_src,\n+\t\t\t\t\t\t\t       action_dst);\n \t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n \t}\n-\treturn 0;\n+\treturn ret;\n }\n \n /**\n@@ -2366,8 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\t    (int)action->type == act_data->type);\n \t\tswitch ((int)act_data->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n-\t\t\trule_acts[act_data->action_dst].action =\n-\t\t\t\tact_data->indirect_list.cb(action);\n+\t\t\tact_data->indirect_list_cb(dev, act_data, actions,\n+\t\t\t\t\t\t   &rule_acts[act_data->action_dst]);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tif (flow_hw_shared_action_construct\n@@ -4664,20 +4836,11 @@ action_template_set_type(struct rte_flow_actions_template *at,\n }\n \n static int\n-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,\n-\t\t\t\t\t  unsigned int action_src,\n+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,\n \t\t\t\t\t  enum mlx5dr_action_type *action_types,\n \t\t\t\t\t  uint16_t *curr_off, uint16_t *cnt_off,\n \t\t\t\t\t  struct rte_flow_actions_template *at)\n {\n-\tuint32_t type;\n-\n-\tif (!mask) {\n-\t\tDRV_LOG(WARNING, \"Unable to determine indirect action type \"\n-\t\t\t\"without a mask specified\");\n-\t\treturn -EINVAL;\n-\t}\n-\ttype = mask->type;\n \tswitch (type) {\n \tcase RTE_FLOW_ACTION_TYPE_RSS:\n \t\taction_template_set_type(at, action_types, action_src, curr_off,\n@@ -4718,12 +4881,24 @@ static int\n flow_hw_template_actions_list(struct rte_flow_actions_template *at,\n \t\t\t      unsigned int action_src,\n \t\t\t      enum mlx5dr_action_type *action_types,\n-\t\t\t      uint16_t *curr_off)\n+\t\t\t      uint16_t *curr_off, uint16_t *cnt_off)\n {\n-\tenum mlx5_indirect_list_type list_type;\n+\tint ret;\n+\tconst struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;\n+\tenum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);\n+\tconst union {\n+\t\tstruct mlx5_indlst_legacy *legacy;\n+\t\tstruct rte_flow_action_list_handle *handle;\n+\t} indlst_obj = { .handle = indlst_conf->handle };\n \n-\tlist_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);\n \tswitch (list_type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n+\t\tret = flow_hw_dr_actions_template_handle_shared\n+\t\t\t(indlst_obj.legacy->legacy_type, action_src,\n+\t\t\t action_types, curr_off, cnt_off, at);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n \t\taction_template_set_type(at, action_types, action_src, curr_off,\n \t\t\t\t\t MLX5DR_ACTION_TYP_DEST_ARRAY);\n@@ -4769,17 +4944,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n \t\t\tret = flow_hw_template_actions_list(at, i, action_types,\n-\t\t\t\t\t\t\t    &curr_off);\n+\t\t\t\t\t\t\t    &curr_off, &cnt_off);\n \t\t\tif (ret)\n \t\t\t\treturn NULL;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tret = flow_hw_dr_actions_template_handle_shared\n-\t\t\t\t\t\t\t\t (&at->masks[i],\n-\t\t\t\t\t\t\t\t  i,\n-\t\t\t\t\t\t\t\t  action_types,\n-\t\t\t\t\t\t\t\t  &curr_off,\n-\t\t\t\t\t\t\t\t  &cnt_off, at);\n+\t\t\t\t(at->masks[i].type, i, action_types,\n+\t\t\t\t &curr_off, &cnt_off, at);\n \t\t\tif (ret)\n \t\t\t\treturn NULL;\n \t\t\tbreak;\n@@ -5259,9 +5431,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \t\t * Need to restore the indirect action index from action conf here.\n \t\t */\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n-\t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n-\t\t\tat->actions[i].conf = actions->conf;\n-\t\t\tat->masks[i].conf = masks->conf;\n+\t\t\tat->actions[i].conf = ra[i].conf;\n+\t\t\tat->masks[i].conf = rm[i].conf;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:\n \t\t\tinfo = actions->conf;\n@@ -9519,18 +9690,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,\n }\n \n void\n-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)\n+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)\n {\n \tuint32_t i;\n \n-\tif (mirror->entry.le_prev)\n-\t\tLIST_REMOVE(mirror, entry);\n+\tmlx5_indirect_list_remove_entry(&mirror->indirect);\n \tfor (i = 0; i < mirror->clones_num; i++)\n \t\tmlx5_mirror_destroy_clone(dev, &mirror->clone[i]);\n \tif (mirror->mirror_action)\n \t\tmlx5dr_action_destroy(mirror->mirror_action);\n-\tif (release)\n-\t\tmlx5_free(mirror);\n+\tmlx5_free(mirror);\n }\n \n static inline enum mlx5dr_table_type\n@@ -9825,7 +9994,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,\n \t\t\t\t   actions, \"Failed to allocate mirror context\");\n \t\treturn NULL;\n \t}\n-\tmirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;\n+\n+\tmirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;\n \tmirror->clones_num = clones_num;\n \tfor (i = 0; i < clones_num; i++) {\n \t\tconst struct rte_flow_action *clone_actions;\n@@ -9856,15 +10026,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,\n \t\tgoto error;\n \t}\n \n-\tLIST_INSERT_HEAD(&priv->indirect_list_head,\n-\t\t\t (struct mlx5_indirect_list *)mirror, entry);\n+\tmlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);\n \treturn (struct rte_flow_action_list_handle *)mirror;\n \n error:\n-\tmlx5_hw_mirror_destroy(dev, mirror, true);\n+\tmlx5_hw_mirror_destroy(dev, mirror);\n \treturn NULL;\n }\n \n+void\n+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_indirect_list *ptr)\n+{\n+\tstruct mlx5_indlst_legacy *obj = (typeof(obj))ptr;\n+\n+\tswitch (obj->legacy_type) {\n+\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\tbreak; /* ASO meters were released in mlx5_flow_meter_flush() */\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\tmlx5_free(obj);\n+}\n+\n+static struct rte_flow_action_list_handle *\n+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t  const struct rte_flow_op_attr *attr,\n+\t\t\t  const struct rte_flow_indir_action_conf *conf,\n+\t\t\t  const struct rte_flow_action *actions,\n+\t\t\t  void *user_data, struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,\n+\t\t\t\t\t\t\t    sizeof(*indlst_obj),\n+\t\t\t\t\t\t\t    0, SOCKET_ID_ANY);\n+\n+\tif (!indlst_obj)\n+\t\treturn NULL;\n+\tindlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,\n+\t\t\t\t\t\t\t  actions, user_data,\n+\t\t\t\t\t\t\t  error);\n+\tif (!indlst_obj->handle) {\n+\t\tmlx5_free(indlst_obj);\n+\t\treturn NULL;\n+\t}\n+\tindlst_obj->legacy_type = actions[0].type;\n+\tindlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;\n+\tmlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);\n+\treturn (struct rte_flow_action_list_handle *)indlst_obj;\n+}\n+\n+static __rte_always_inline enum mlx5_indirect_list_type\n+flow_hw_inlist_type_get(const struct rte_flow_action *actions)\n+{\n+\tswitch (actions[0].type) {\n+\tcase RTE_FLOW_ACTION_TYPE_SAMPLE:\n+\t\treturn MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;\n+\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\treturn actions[1].type == RTE_FLOW_ACTION_TYPE_END ?\n+\t\t       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :\n+\t\t       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;\n+}\n+\n static struct rte_flow_action_list_handle *\n flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t\tconst struct rte_flow_op_attr *attr,\n@@ -9875,6 +10102,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n {\n \tstruct mlx5_hw_q_job *job = NULL;\n \tbool push = flow_hw_action_push(attr);\n+\tenum mlx5_indirect_list_type list_type;\n \tstruct rte_flow_action_list_handle *handle;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct mlx5_flow_template_table_cfg table_cfg = {\n@@ -9893,6 +10121,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t   NULL, \"No action list\");\n \t\treturn NULL;\n \t}\n+\tlist_type = flow_hw_inlist_type_get(actions);\n+\tif (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {\n+\t\t/*\n+\t\t * Legacy indirect actions already have\n+\t\t * async resources management. No need to do it twice.\n+\t\t */\n+\t\thandle = mlx5_create_legacy_indlst(dev, queue, attr, conf,\n+\t\t\t\t\t\t   actions, user_data, error);\n+\t\tgoto end;\n+\t}\n \tif (attr) {\n \t\tjob = flow_hw_action_job_init(priv, queue, NULL, user_data,\n \t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,\n@@ -9900,8 +10138,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\tif (!job)\n \t\t\treturn NULL;\n \t}\n-\tswitch (actions[0].type) {\n-\tcase RTE_FLOW_ACTION_TYPE_SAMPLE:\n+\tswitch (list_type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n \t\thandle = mlx5_hw_mirror_handle_create(dev, &table_cfg,\n \t\t\t\t\t\t      actions, error);\n \t\tbreak;\n@@ -9915,6 +10153,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\tflow_hw_action_finalize(dev, queue, job, push, false,\n \t\t\t\t\thandle != NULL);\n \t}\n+end:\n \treturn handle;\n }\n \n@@ -9943,6 +10182,15 @@ flow_hw_async_action_list_handle_destroy\n \tenum mlx5_indirect_list_type type =\n \t\tmlx5_get_indirect_list_type((void *)handle);\n \n+\tif (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {\n+\t\tstruct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;\n+\n+\t\tret = flow_hw_action_handle_destroy(dev, queue, attr,\n+\t\t\t\t\t\t    legacy->handle,\n+\t\t\t\t\t\t    user_data, error);\n+\t\tmlx5_indirect_list_remove_entry(&legacy->indirect);\n+\t\tgoto end;\n+\t}\n \tif (attr) {\n \t\tjob = flow_hw_action_job_init(priv, queue, NULL, user_data,\n \t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,\n@@ -9952,20 +10200,17 @@ flow_hw_async_action_list_handle_destroy\n \t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n-\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);\n+\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);\n \t\tbreak;\n \tdefault:\n-\t\thandle = NULL;\n \t\tret = rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t  NULL, \"Invalid indirect list handle\");\n \t}\n \tif (job) {\n-\t\tjob->action = handle;\n-\t\tflow_hw_action_finalize(dev, queue, job, push, false,\n-\t\t\t\t       handle != NULL);\n+\t\tflow_hw_action_finalize(dev, queue, job, push, false, true);\n \t}\n-\tmlx5_free(handle);\n+end:\n \treturn ret;\n }\n \n@@ -9979,6 +10224,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\terror);\n }\n \n+static int\n+flow_hw_async_action_list_handle_query_update\n+\t\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t const struct rte_flow_op_attr *attr,\n+\t\t const struct rte_flow_action_list_handle *handle,\n+\t\t const void **update, void **query,\n+\t\t enum rte_flow_query_update_mode mode,\n+\t\t void *user_data, struct rte_flow_error *error)\n+{\n+\tenum mlx5_indirect_list_type type =\n+\t\tmlx5_get_indirect_list_type((const void *)handle);\n+\n+\tif (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {\n+\t\tstruct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;\n+\n+\t\tif (update && query)\n+\t\t\treturn flow_hw_async_action_handle_query_update\n+\t\t\t\t(dev, queue_id, attr, legacy->handle,\n+\t\t\t\t update, query, mode, user_data, error);\n+\t\telse if (update && update[0])\n+\t\t\treturn flow_hw_action_handle_update(dev, queue_id, attr,\n+\t\t\t\t\t\t\t    legacy->handle, update[0],\n+\t\t\t\t\t\t\t    user_data, error);\n+\t\telse if (query && query[0])\n+\t\t\treturn flow_hw_action_handle_query(dev, queue_id, attr,\n+\t\t\t\t\t\t\t   legacy->handle, query[0],\n+\t\t\t\t\t\t\t   user_data, error);\n+\t\telse\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\t  NULL, \"invalid legacy handle query_update parameters\");\n+\t}\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,\n+\t\t\t\t\tconst struct rte_flow_action_list_handle *handle,\n+\t\t\t\t\tconst void **update, void **query,\n+\t\t\t\t\tenum rte_flow_query_update_mode mode,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\treturn flow_hw_async_action_list_handle_query_update\n+\t\t\t\t\t(dev, MLX5_HW_INV_QUEUE, NULL, handle,\n+\t\t\t\t\t update, query, mode, NULL, error);\n+}\n+\n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.info_get = flow_hw_info_get,\n \t.configure = flow_hw_configure,\n@@ -10009,10 +10301,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.action_query_update = flow_hw_action_query_update,\n \t.action_list_handle_create = flow_hw_action_list_handle_create,\n \t.action_list_handle_destroy = flow_hw_action_list_handle_destroy,\n+\t.action_list_handle_query_update =\n+\t\tflow_hw_action_list_handle_query_update,\n \t.async_action_list_handle_create =\n \t\tflow_hw_async_action_list_handle_create,\n \t.async_action_list_handle_destroy =\n \t\tflow_hw_async_action_list_handle_destroy,\n+\t.async_action_list_handle_query_update =\n+\t\tflow_hw_async_action_list_handle_query_update,\n \t.query = flow_hw_query,\n \t.get_aged_flows = flow_hw_get_aged_flows,\n \t.get_q_aged_flows = flow_hw_get_q_aged_flows,\n",
    "prefixes": [
        "v6",
        "10/10"
    ]
}