get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/138095/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 138095,
    "url": "http://patches.dpdk.org/api/patches/138095/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20240307101910.1135720-3-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240307101910.1135720-3-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240307101910.1135720-3-getelson@nvidia.com",
    "date": "2024-03-07T10:19:09",
    "name": "[v2,2/3] net/mlx5: fix sync meter processing in HWS setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "70fc689ff5ac12fcd380fb5377d42f93106fcd3c",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20240307101910.1135720-3-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 31419,
            "url": "http://patches.dpdk.org/api/series/31419/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31419",
            "date": "2024-03-07T10:19:08",
            "name": "net/mlx5: fix sync meter processing in HWS setup",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/31419/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/138095/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/138095/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9AC4143BA7;\n\tThu,  7 Mar 2024 11:20:02 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0E27E42EB5;\n\tThu,  7 Mar 2024 11:19:55 +0100 (CET)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2041.outbound.protection.outlook.com [40.107.243.41])\n by mails.dpdk.org (Postfix) with ESMTP id CD3EA42E97;\n Thu,  7 Mar 2024 11:19:53 +0100 (CET)",
            "from CH0PR08CA0009.namprd08.prod.outlook.com (2603:10b6:610:33::14)\n by SA1PR12MB7247.namprd12.prod.outlook.com (2603:10b6:806:2bb::6)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7362.24; Thu, 7 Mar\n 2024 10:19:49 +0000",
            "from CH2PEPF0000009A.namprd02.prod.outlook.com\n (2603:10b6:610:33:cafe::52) by CH0PR08CA0009.outlook.office365.com\n (2603:10b6:610:33::14) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7362.26 via Frontend\n Transport; Thu, 7 Mar 2024 10:19:49 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n CH2PEPF0000009A.mail.protection.outlook.com (10.167.244.22) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7362.11 via Frontend Transport; Thu, 7 Mar 2024 10:19:48 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Thu, 7 Mar 2024\n 02:19:34 -0800",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Thu, 7 Mar\n 2024 02:19:30 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=WuNz1YOO0BXWMMDJt8g4vUo36DWeTZ2CkoRB6pTRJxYsqoosslGKq2KlkrXmISAfyqg0oPpNTymSvpdhd9nGry6QbmFyRZ6VfmETOzyOZMF/Ii/kP0tr1ss28O9GFUxdNUKzw7g+d0RtQKjSmDHmpmRHzH3gL+mkg4g7gC2wiWyrw8AOO8x7p6FBWg4XSuhJrsNO0PL8geAiRpNVxWnhmFCZdGT+KWvHxHBzv1Cwb7Pa4iq1Um2MMS+n5Ji3LehxUWR3+lqaB9DTCcn2Dd/srmrUIPNsOAoakhwYVhwe6G1NxOeTCI7hhL/fVqrw580ORhW73bTbMYG9tncxipmwlQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=BY96S1X7Np4e3CpN8LWzqOR7ESgR17U59xuo3sVYRQg=;\n b=I4YR+VLpYnQBUaU4z8Ad2PBud5CYeoPCNCGc1AXa9yvlYpOhToic+3sujUtqwkgH729KMMrV2lNzwglbj0cxP1qPG7YwKFm1nP+0m1B1KGJ2W4KBtFWd5Jd0hSTWOWKHXHgf8E7gyakpJRm1OdzEdRZELnSLG86yKnSoWAbTZZPOuqqlTweSlosqHngoKwW8/h3fWa+DevFMYwRRDNG2RuT8xPJ12dOnxh6XoBS+o+w4bMiBiZ7LYm6+gfYq66odUDIbMi1u8zJE4jSghVctp6valQ69twtW71NRDdD4zyxFCO25ER/m/gBYDFQ5tOJmPDK6GZ479oA21Lu0Rtsjqg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=BY96S1X7Np4e3CpN8LWzqOR7ESgR17U59xuo3sVYRQg=;\n b=tqc6JD7/aR5FJBxKerLcKiWfDdiVyjUHQ9I9knQKDiBMqeOQK5NJMKsT23qaM5wvsznR0qIP0QQDTwcWh8qwZ7ksTqEPgoQmtYE762RGSFTNC5Hhf/J9M07RUUaSrliRwNgKauEJXniqKWbC7zVjmSUe/0gne3fMqgz/mIp8R+sc7/9BfhurlizHxoS2qQ9DWCCwWeOs+5Bt2l4QQl6jC0IzrMGxS71OwsTLE1dojkJe5Fi2ObnIQ2XJSKW7Rc3VhoFKEqEj20XCUPiW9YWa8lrW7HzYjMm50qWNuqJXn061jSTNjdBKRh0s2mWpUuEDgBfpwpkve/Dt0es4pG5bxA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <mkashani@nvidia.com>, <rasland@nvidia.com>,\n <stable@dpdk.org>, Dariusz Sosnowski <dsosnowski@nvidia.com>, \"Viacheslav\n Ovsiienko\" <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>, Suanming Mou\n <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>, Alexander Kozyrev\n <akozyrev@nvidia.com>",
        "Subject": "[PATCH v2 2/3] net/mlx5: fix sync meter processing in HWS setup",
        "Date": "Thu, 7 Mar 2024 12:19:09 +0200",
        "Message-ID": "<20240307101910.1135720-3-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20240307101910.1135720-1-getelson@nvidia.com>",
        "References": "<20240229105614.593391-1-getelson@nvidia.com>\n <20240307101910.1135720-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CH2PEPF0000009A:EE_|SA1PR12MB7247:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "5502c6b4-c69b-4dd8-855f-08dc3e901ea3",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n i13HGNOt7aT7gC2PYqBbzJTLUrqwmB1TsctnbRGVgho62cuod3hwFmrX6u5oyomdtRgPbLlQRQYW/d+4GCp0KnZBJ5OdrF1DHYOLfWw/6a53R2+1ZpgJsnW3KQes0mU/he028yFSvxXxiUW1CG4uaqljmyDB9+rjjtlWUfM/qgO6rNfZ0w0ipM3Po45Dh3B9BXxnujZKH/XsasBC/ap9bwlHgH4Ja7qSdk6Z/i8kkeUdxUDOA/0fmOw5y+J5AmTx6GUCOVSmX/48+ZcxaOnrwKKUBLK9Xjl8wWwP04C2LqMvZ3e50qOg8v7m5KhABT3wjnbpvQxHotTUeNqh+XvDhfn+t8aEK/xF+vFNJc7IrqmOODTI5Z+218Kh764Ysxi6s1tEgiFkb543AJQA7OdKYSqWoVtkk+59aJv6neWNpU+bmEZntwh3JLDa9PczpT+TLZtP24/Cu9ZBWVr7QVGYd34aEw4xhxhrqGGRcdvmAjK+mmsxJzTU+oe1siMxvV27vxNFF/s35tOtUQMJnArR0WshCQIIcK3cKGdyQw+f0MEkNKL8tQbI0aT6BCg6o2B6ALfepAmMKw3GG+c3T4AR5eDJYb5u+kuC+hQZaFiMeBzknH2yxSQBJo2l6FcxYz48zPjiFfYLYQPKVFGxAk7k9HXp/pRgFr/jYad41ChYZ5l/tyFhcVpagTfiXqfvbkLmA5vpj6UopINs5Jl16Kh7SfXLIL02N2R5cac4Q4xjanrP4Gqoc42Zk89crj5jiZLW",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(36860700004)(82310400014)(376005); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "07 Mar 2024 10:19:48.8450 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 5502c6b4-c69b-4dd8-855f-08dc3e901ea3",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CH2PEPF0000009A.namprd02.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SA1PR12MB7247",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Synchronous calls for meter ASO try to pull pending completions\nfrom CQ, submit WR and return to caller. That avoids delays between\nWR post and  HW response.\nIf the template API was activated, PMD will use control queue for\nsync operations.\n\nPMD has different formats for the `user_data` context in sync and\nasync meter ASO calls.\nPMD port destruction procedure submits async operations to the port\ncontrol queue and polls the queue CQs to clean HW responses.\n\nPort destruction can pull a meter ASO completion from control CQ.\nSuch completion has sync format, but was processed by async handler.\n\nThe patch implements sync meter ASO interface with async calls\nin the template API environment.\n\nFixes: 48fbb0e93d06 (\"net/mlx5: support flow meter mark indirect action with HWS\")\n\nCc: stable@dpdk.org\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h            |  35 +++++-\n drivers/net/mlx5/mlx5_flow_aso.c   | 178 ++++++++++++++++++-----------\n drivers/net/mlx5/mlx5_flow_hw.c    |  99 ++++++++--------\n drivers/net/mlx5/mlx5_flow_meter.c |  27 +++--\n 4 files changed, 216 insertions(+), 123 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 2fb3bb65cc..6ff8f322e0 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -2033,6 +2033,30 @@ enum dr_dump_rec_type {\n \tDR_DUMP_REC_TYPE_PMD_COUNTER = 4430,\n };\n \n+#if defined(HAVE_MLX5_HWS_SUPPORT)\n+static __rte_always_inline struct mlx5_hw_q_job *\n+flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)\n+{\n+\tMLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);\n+\treturn priv->hw_q[queue].job_idx ?\n+\t       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;\n+}\n+\n+static __rte_always_inline void\n+flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)\n+{\n+\tMLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);\n+\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n+}\n+\n+struct mlx5_hw_q_job *\n+mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,\n+\t\t\t  const struct rte_flow_action_handle *handle,\n+\t\t\t  void *user_data, void *query_data,\n+\t\t\t  enum mlx5_hw_job_type type,\n+\t\t\t  struct rte_flow_error *error);\n+#endif\n+\n /**\n  * Indicates whether HW objects operations can be created by DevX.\n  *\n@@ -2443,11 +2467,12 @@ int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh);\n int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);\n void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n \t\t\t   enum mlx5_access_aso_opc_mod aso_opc_mod);\n-int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n-\t\tstruct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk,\n-\t\tvoid *user_data, bool push);\n-int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n-\t\tstruct mlx5_aso_mtr *mtr);\n+int mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue,\n+\t\t\t\t struct mlx5_aso_mtr *mtr,\n+\t\t\t\t struct mlx5_mtr_bulk *bulk,\n+\t\t\t\t struct mlx5_hw_q_job *job, bool push);\n+int mlx5_aso_mtr_wait(struct mlx5_priv *priv,\n+\t\t      struct mlx5_aso_mtr *mtr, bool is_tmpl_api);\n int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n \t\t\t      const struct rte_flow_action_conntrack *profile,\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex f311443472..ab9eb21e01 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -792,7 +792,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t\t\t       struct mlx5_aso_mtr *aso_mtr,\n \t\t\t       struct mlx5_mtr_bulk *bulk,\n \t\t\t       bool need_lock,\n-\t\t\t       void *user_data,\n+\t\t\t       struct mlx5_hw_q_job *job,\n \t\t\t       bool push)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n@@ -819,7 +819,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n \t/* Fill next WQE. */\n \tfm = &aso_mtr->fm;\n-\tsq->elts[sq->head & mask].mtr = user_data ? user_data : aso_mtr;\n+\tsq->elts[sq->head & mask].user_data = job ? job : (void *)aso_mtr;\n \tif (aso_mtr->type == ASO_METER_INDIRECT) {\n \t\tif (likely(sh->config.dv_flow_en == 2))\n \t\t\tpool = aso_mtr->pool;\n@@ -897,24 +897,6 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \treturn 1;\n }\n \n-static void\n-mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums)\n-{\n-\tuint16_t size = 1 << sq->log_desc_n;\n-\tuint16_t mask = size - 1;\n-\tuint16_t i;\n-\tstruct mlx5_aso_mtr *aso_mtr = NULL;\n-\tuint8_t exp_state = ASO_METER_WAIT;\n-\n-\tfor (i = 0; i < aso_mtrs_nums; ++i) {\n-\t\taso_mtr = sq->elts[(sq->tail + i) & mask].mtr;\n-\t\tMLX5_ASSERT(aso_mtr);\n-\t\t(void)__atomic_compare_exchange_n(&aso_mtr->state,\n-\t\t\t\t&exp_state, ASO_METER_READY,\n-\t\t\t\tfalse, __ATOMIC_RELAXED, __ATOMIC_RELAXED);\n-\t}\n-}\n-\n static void\n mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)\n {\n@@ -925,7 +907,7 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)\n \tuint32_t idx;\n \tuint32_t next_idx = cq->cq_ci & mask;\n \tuint16_t max;\n-\tuint16_t n = 0;\n+\tuint16_t i, n = 0;\n \tint ret;\n \n \tif (need_lock)\n@@ -957,7 +939,19 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)\n \t\tcq->cq_ci++;\n \t} while (1);\n \tif (likely(n)) {\n-\t\tmlx5_aso_mtrs_status_update(sq, n);\n+\t\tuint8_t exp_state = ASO_METER_WAIT;\n+\t\tstruct mlx5_aso_mtr *aso_mtr;\n+\t\t__rte_unused bool verdict;\n+\n+\t\tfor (i = 0; i < n; ++i) {\n+\t\t\taso_mtr = sq->elts[(sq->tail + i) & mask].mtr;\n+\t\t\tMLX5_ASSERT(aso_mtr);\n+\t\t\tverdict = __atomic_compare_exchange_n(&aso_mtr->state,\n+\t\t\t\t\t\t    &exp_state, ASO_METER_READY,\n+\t\t\t\t\t\t    false, __ATOMIC_RELAXED,\n+\t\t\t\t\t\t    __ATOMIC_RELAXED);\n+\t\t\tMLX5_ASSERT(verdict);\n+\t\t}\n \t\tsq->tail += n;\n \t\trte_io_wmb();\n \t\tcq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);\n@@ -966,6 +960,82 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)\n \t\trte_spinlock_unlock(&sq->sqsl);\n }\n \n+static __rte_always_inline struct mlx5_aso_sq *\n+mlx5_aso_mtr_select_sq(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n+\t\t       struct mlx5_aso_mtr *mtr, bool *need_lock)\n+{\n+\tstruct mlx5_aso_sq *sq;\n+\n+\tif (likely(sh->config.dv_flow_en == 2) &&\n+\t    mtr->type == ASO_METER_INDIRECT) {\n+\t\tif (queue == MLX5_HW_INV_QUEUE) {\n+\t\t\tsq = &mtr->pool->sq[mtr->pool->nb_sq - 1];\n+\t\t\t*need_lock = true;\n+\t\t} else {\n+\t\t\tsq = &mtr->pool->sq[queue];\n+\t\t\t*need_lock = false;\n+\t\t}\n+\t} else {\n+\t\tsq = &sh->mtrmng->pools_mng.sq;\n+\t\t*need_lock = true;\n+\t}\n+\treturn sq;\n+}\n+\n+#if defined(HAVE_MLX5_HWS_SUPPORT)\n+static void\n+mlx5_aso_poll_cq_mtr_hws(struct mlx5_priv *priv, struct mlx5_aso_sq *sq)\n+{\n+#define MLX5_HWS_MTR_CMPL_NUM 4\n+\n+\tint i, ret;\n+\tstruct mlx5_aso_mtr *mtr;\n+\tuint8_t exp_state = ASO_METER_WAIT;\n+\tstruct rte_flow_op_result res[MLX5_HWS_MTR_CMPL_NUM];\n+\t__rte_unused bool verdict;\n+\n+\trte_spinlock_lock(&sq->sqsl);\n+repeat:\n+\tret = mlx5_aso_pull_completion(sq, res, MLX5_HWS_MTR_CMPL_NUM);\n+\tif (ret) {\n+\t\tfor (i = 0; i < ret; i++) {\n+\t\t\tstruct mlx5_hw_q_job *job = res[i].user_data;\n+\n+\t\t\tMLX5_ASSERT(job);\n+\t\t\tmtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,\n+\t\t\t\t\t     MLX5_INDIRECT_ACTION_IDX_GET(job->action));\n+\t\t\tMLX5_ASSERT(mtr);\n+\t\t\tverdict = __atomic_compare_exchange_n(&mtr->state,\n+\t\t\t\t\t\t    &exp_state, ASO_METER_READY,\n+\t\t\t\t\t\t    false, __ATOMIC_RELAXED,\n+\t\t\t\t\t\t    __ATOMIC_RELAXED);\n+\t\t\tMLX5_ASSERT(verdict);\n+\t\t\tflow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));\n+\t\t}\n+\t\tif (ret == MLX5_HWS_MTR_CMPL_NUM)\n+\t\t\tgoto repeat;\n+\t}\n+\trte_spinlock_unlock(&sq->sqsl);\n+\n+#undef MLX5_HWS_MTR_CMPL_NUM\n+}\n+#else\n+static void\n+mlx5_aso_poll_cq_mtr_hws(__rte_unused struct mlx5_priv *priv, __rte_unused struct mlx5_aso_sq *sq)\n+{\n+\tMLX5_ASSERT(false);\n+}\n+#endif\n+\n+static void\n+mlx5_aso_poll_cq_mtr_sws(__rte_unused struct mlx5_priv *priv,\n+\t\t\t struct mlx5_aso_sq *sq)\n+{\n+\tmlx5_aso_mtr_completion_handle(sq, true);\n+}\n+\n+typedef void (*poll_cq_t)(struct mlx5_priv *, struct mlx5_aso_sq *);\n+\n /**\n  * Update meter parameter by send WQE.\n  *\n@@ -980,39 +1050,29 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n int\n-mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n-\t\t\tstruct mlx5_aso_mtr *mtr,\n-\t\t\tstruct mlx5_mtr_bulk *bulk,\n-\t\t\tvoid *user_data,\n-\t\t\tbool push)\n+mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue,\n+\t\t\t     struct mlx5_aso_mtr *mtr,\n+\t\t\t     struct mlx5_mtr_bulk *bulk,\n+\t\t\t     struct mlx5_hw_q_job *job, bool push)\n {\n-\tstruct mlx5_aso_sq *sq;\n-\tuint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n \tbool need_lock;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n+\tstruct mlx5_aso_sq *sq =\n+\t\tmlx5_aso_mtr_select_sq(sh, queue, mtr, &need_lock);\n+\tuint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n+\tpoll_cq_t poll_mtr_cq =\n+\t\tjob ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;\n \tint ret;\n \n-\tif (likely(sh->config.dv_flow_en == 2) &&\n-\t    mtr->type == ASO_METER_INDIRECT) {\n-\t\tif (queue == MLX5_HW_INV_QUEUE) {\n-\t\t\tsq = &mtr->pool->sq[mtr->pool->nb_sq - 1];\n-\t\t\tneed_lock = true;\n-\t\t} else {\n-\t\t\tsq = &mtr->pool->sq[queue];\n-\t\t\tneed_lock = false;\n-\t\t}\n-\t} else {\n-\t\tsq = &sh->mtrmng->pools_mng.sq;\n-\t\tneed_lock = true;\n-\t}\n \tif (queue != MLX5_HW_INV_QUEUE) {\n \t\tret = mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,\n-\t\t\t\t\t\t     need_lock, user_data, push);\n+\t\t\t\t\t\t     need_lock, job, push);\n \t\treturn ret > 0 ? 0 : -1;\n \t}\n \tdo {\n-\t\tmlx5_aso_mtr_completion_handle(sq, need_lock);\n+\t\tpoll_mtr_cq(priv, sq);\n \t\tif (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,\n-\t\t\t\t\t\t   need_lock, NULL, true))\n+\t\t\t\t\t\t   need_lock, job, true))\n \t\t\treturn 0;\n \t\t/* Waiting for wqe resource. */\n \t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n@@ -1036,32 +1096,22 @@ mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n int\n-mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n-\t\t\tstruct mlx5_aso_mtr *mtr)\n+mlx5_aso_mtr_wait(struct mlx5_priv *priv,\n+\t\t  struct mlx5_aso_mtr *mtr, bool is_tmpl_api)\n {\n+\tbool need_lock;\n \tstruct mlx5_aso_sq *sq;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n \tuint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n-\tuint8_t state;\n-\tbool need_lock;\n+\tuint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);\n+\tpoll_cq_t poll_mtr_cq =\n+\t\tis_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;\n \n-\tif (likely(sh->config.dv_flow_en == 2) &&\n-\t    mtr->type == ASO_METER_INDIRECT) {\n-\t\tif (queue == MLX5_HW_INV_QUEUE) {\n-\t\t\tsq = &mtr->pool->sq[mtr->pool->nb_sq - 1];\n-\t\t\tneed_lock = true;\n-\t\t} else {\n-\t\t\tsq = &mtr->pool->sq[queue];\n-\t\t\tneed_lock = false;\n-\t\t}\n-\t} else {\n-\t\tsq = &sh->mtrmng->pools_mng.sq;\n-\t\tneed_lock = true;\n-\t}\n-\tstate = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);\n \tif (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC)\n \t\treturn 0;\n+\tsq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);\n \tdo {\n-\t\tmlx5_aso_mtr_completion_handle(sq, need_lock);\n+\t\tpoll_mtr_cq(priv, sq);\n \t\tif (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==\n \t\t\t\t\t    ASO_METER_READY)\n \t\t\treturn 0;\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex c1b09c9c03..8f004b5435 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -183,6 +183,12 @@ mlx5_flow_hw_aux_get_mtr_id(struct rte_flow_hw *flow, struct rte_flow_hw_aux *au\n \t\treturn aux->orig.mtr_id;\n }\n \n+static __rte_always_inline struct mlx5_hw_q_job *\n+flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,\n+\t\t\tconst struct rte_flow_action_handle *handle,\n+\t\t\tvoid *user_data, void *query_data,\n+\t\t\tenum mlx5_hw_job_type type,\n+\t\t\tstruct rte_flow_error *error);\n static int\n mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,\n \t\t\t       struct rte_flow_template_table *tbl,\n@@ -384,21 +390,6 @@ flow_hw_q_dec_flow_ops(struct mlx5_priv *priv, uint32_t queue)\n \tq->ongoing_flow_ops--;\n }\n \n-static __rte_always_inline struct mlx5_hw_q_job *\n-flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)\n-{\n-\tMLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);\n-\treturn priv->hw_q[queue].job_idx ?\n-\t       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;\n-}\n-\n-static __rte_always_inline void\n-flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)\n-{\n-\tMLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);\n-\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n-}\n-\n static inline enum mlx5dr_matcher_insert_mode\n flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)\n {\n@@ -1560,7 +1551,7 @@ flow_hw_meter_compile(struct rte_eth_dev *dev,\n \tacts->rule_acts[jump_pos].action = (!!group) ?\n \t\t\t\t    acts->jump->hws_action :\n \t\t\t\t    acts->jump->root_action;\n-\tif (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n+\tif (mlx5_aso_mtr_wait(priv, aso_mtr, true))\n \t\treturn -ENOMEM;\n \treturn 0;\n }\n@@ -1637,7 +1628,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n static __rte_always_inline struct mlx5_aso_mtr *\n flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t const struct rte_flow_action *action,\n-\t\t\t void *user_data, bool push,\n+\t\t\t struct mlx5_hw_q_job *job, bool push,\n \t\t\t struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -1646,6 +1637,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tstruct mlx5_flow_meter_info *fm;\n \tuint32_t mtr_id;\n+\tuintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<\n+\t\t\t\t\tMLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \n \tif (priv->shared_host) {\n \t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n@@ -1669,15 +1662,16 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;\n \taso_mtr->offset = mtr_id - 1;\n \taso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;\n+\tjob->action = (void *)(handle | mtr_id);\n \t/* Update ASO flow meter by wqe. */\n-\tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,\n-\t\t\t\t\t &priv->mtr_bulk, user_data, push)) {\n+\tif (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,\n+\t\t\t\t\t &priv->mtr_bulk, job, push)) {\n \t\tmlx5_ipool_free(pool->idx_pool, mtr_id);\n \t\treturn NULL;\n \t}\n \t/* Wait for ASO object completion. */\n \tif (queue == MLX5_HW_INV_QUEUE &&\n-\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {\n+\t    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {\n \t\tmlx5_ipool_free(pool->idx_pool, mtr_id);\n \t\treturn NULL;\n \t}\n@@ -1696,10 +1690,18 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n \tstruct mlx5_aso_mtr *aso_mtr;\n+\tstruct mlx5_hw_q_job *job =\n+\t\tflow_hw_action_job_init(priv, queue, NULL, NULL, NULL,\n+\t\t\t\t\tMLX5_HW_Q_JOB_TYPE_CREATE, NULL);\n \n-\taso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true, error);\n-\tif (!aso_mtr)\n+\tif (!job)\n+\t\treturn -1;\n+\taso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job,\n+\t\t\t\t\t   true, error);\n+\tif (!aso_mtr) {\n+\t\tflow_hw_job_put(priv, job, queue);\n \t\treturn -1;\n+\t}\n \n \t/* Compile METER_MARK action */\n \tacts[aso_mtr_pos].action = pool->action;\n@@ -3275,7 +3277,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\t jump->root_action;\n \t\t\tflow->jump = jump;\n \t\t\tflow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;\n-\t\t\tif (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n+\t\t\tif (mlx5_aso_mtr_wait(priv, aso_mtr, true))\n \t\t\t\treturn -1;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n@@ -4009,13 +4011,6 @@ flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job\n \t\t\t\t\t\tjob->query.hw);\n \t\t\taso_ct->state = ASO_CONNTRACK_READY;\n \t\t}\n-\t} else {\n-\t\t/*\n-\t\t * rte_flow_op_result::user data can point to\n-\t\t * struct mlx5_aso_mtr object as well\n-\t\t */\n-\t\tif (queue != CTRL_QUEUE_ID(priv))\n-\t\t\tMLX5_ASSERT(false);\n \t}\n }\n \n@@ -11007,7 +11002,8 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,\n {\n \tstruct mlx5_hw_q_job *job;\n \n-\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\tif (queue == MLX5_HW_INV_QUEUE)\n+\t\tqueue = CTRL_QUEUE_ID(priv);\n \tjob = flow_hw_job_get(priv, queue);\n \tif (!job) {\n \t\trte_flow_error_set(error, ENOMEM,\n@@ -11022,6 +11018,17 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,\n \treturn job;\n }\n \n+struct mlx5_hw_q_job *\n+mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,\n+\t\t\t  const struct rte_flow_action_handle *handle,\n+\t\t\t  void *user_data, void *query_data,\n+\t\t\t  enum mlx5_hw_job_type type,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\treturn flow_hw_action_job_init(priv, queue, handle, user_data, query_data,\n+\t\t\t\t       type, error);\n+}\n+\n static __rte_always_inline void\n flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\tstruct mlx5_hw_q_job *job,\n@@ -11081,12 +11088,12 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \tconst struct rte_flow_action_age *age;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tcnt_id_t cnt_id;\n-\tuint32_t mtr_id;\n \tuint32_t age_idx;\n \tbool push = flow_hw_action_push(attr);\n \tbool aso = false;\n+\tbool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK;\n \n-\tif (attr) {\n+\tif (attr || force_job) {\n \t\tjob = flow_hw_action_job_init(priv, queue, NULL, user_data,\n \t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,\n \t\t\t\t\t      error);\n@@ -11141,9 +11148,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\taso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push, error);\n \t\tif (!aso_mtr)\n \t\t\tbreak;\n-\t\tmtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<\n-\t\t\tMLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id);\n-\t\thandle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id;\n+\t\thandle = (void *)(uintptr_t)job->action;\n \t\tbreak;\n \tcase RTE_FLOW_ACTION_TYPE_RSS:\n \t\thandle = flow_dv_action_create(dev, conf, action, error);\n@@ -11158,7 +11163,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t   NULL, \"action type not supported\");\n \t\tbreak;\n \t}\n-\tif (job) {\n+\tif (job && !force_job) {\n \t\tjob->action = handle;\n \t\tjob->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY;\n \t\tflow_hw_action_finalize(dev, queue, job, push, aso,\n@@ -11191,15 +11196,17 @@ mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,\n \t\tfm->color_aware = meter_mark->color_mode;\n \tif (upd_meter_mark->state_valid)\n \t\tfm->is_enable = meter_mark->state;\n+\taso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?\n+\t\t\t ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;\n \t/* Update ASO flow meter by wqe. */\n-\tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue,\n+\tif (mlx5_aso_meter_update_by_wqe(priv, queue,\n \t\t\t\t\t aso_mtr, &priv->mtr_bulk, job, push))\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  NULL, \"Unable to update ASO meter WQE\");\n \t/* Wait for ASO object completion. */\n \tif (queue == MLX5_HW_INV_QUEUE &&\n-\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n+\t    mlx5_aso_mtr_wait(priv, aso_mtr, true))\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  NULL, \"Unable to wait for ASO meter CQE\");\n@@ -11245,8 +11252,9 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \tint ret = 0;\n \tbool push = flow_hw_action_push(attr);\n \tbool aso = false;\n+\tbool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;\n \n-\tif (attr) {\n+\tif (attr || force_job) {\n \t\tjob = flow_hw_action_job_init(priv, queue, handle, user_data,\n \t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,\n \t\t\t\t\t      error);\n@@ -11283,7 +11291,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t\t  \"action type not supported\");\n \t\tbreak;\n \t}\n-\tif (job)\n+\tif (job && !force_job)\n \t\tflow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);\n \treturn ret;\n }\n@@ -11326,8 +11334,9 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \tbool push = flow_hw_action_push(attr);\n \tbool aso = false;\n \tint ret = 0;\n+\tbool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;\n \n-\tif (attr) {\n+\tif (attr || force_job) {\n \t\tjob = flow_hw_action_job_init(priv, queue, handle, user_data,\n \t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,\n \t\t\t\t\t      error);\n@@ -11363,7 +11372,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\tfm = &aso_mtr->fm;\n \t\tfm->is_enable = 0;\n \t\t/* Update ASO flow meter by wqe. */\n-\t\tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,\n+\t\tif (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,\n \t\t\t\t\t\t &priv->mtr_bulk, job, push)) {\n \t\t\tret = -EINVAL;\n \t\t\trte_flow_error_set(error, EINVAL,\n@@ -11373,7 +11382,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\t}\n \t\t/* Wait for ASO object completion. */\n \t\tif (queue == MLX5_HW_INV_QUEUE &&\n-\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {\n+\t\t    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {\n \t\t\tret = -EINVAL;\n \t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -11397,7 +11406,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t\t  \"action type not supported\");\n \t\tbreak;\n \t}\n-\tif (job)\n+\tif (job && !force_job)\n \t\tflow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);\n \treturn ret;\n }\ndiff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c\nindex 57de95b4b9..4045c4c249 100644\n--- a/drivers/net/mlx5/mlx5_flow_meter.c\n+++ b/drivers/net/mlx5/mlx5_flow_meter.c\n@@ -1897,12 +1897,12 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,\n \tif (sh->meter_aso_en) {\n \t\tfm->is_enable = !!is_enable;\n \t\taso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);\n-\t\tret = mlx5_aso_meter_update_by_wqe(sh, MLX5_HW_INV_QUEUE,\n+\t\tret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE,\n \t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk,\n \t\t\t\t\t\t   NULL, true);\n \t\tif (ret)\n \t\t\treturn ret;\n-\t\tret = mlx5_aso_mtr_wait(sh, MLX5_HW_INV_QUEUE, aso_mtr);\n+\t\tret = mlx5_aso_mtr_wait(priv, aso_mtr, false);\n \t\tif (ret)\n \t\t\treturn ret;\n \t} else {\n@@ -2148,7 +2148,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,\n \t/* If ASO meter supported, update ASO flow meter by wqe. */\n \tif (priv->sh->meter_aso_en) {\n \t\taso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);\n-\t\tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n+\t\tret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE,\n \t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk, NULL, true);\n \t\tif (ret)\n \t\t\tgoto error;\n@@ -2210,6 +2210,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id,\n \tstruct mlx5_flow_meter_info *fm;\n \tstruct mlx5_flow_meter_policy *policy = NULL;\n \tstruct mlx5_aso_mtr *aso_mtr;\n+\tstruct mlx5_hw_q_job *job;\n \tint ret;\n \n \tif (!priv->mtr_profile_arr ||\n@@ -2255,12 +2256,20 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id,\n \tfm->shared = !!shared;\n \tfm->initialized = 1;\n \t/* Update ASO flow meter by wqe. */\n-\tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr,\n-\t\t\t\t\t   &priv->mtr_bulk, NULL, true);\n-\tif (ret)\n+\tjob = mlx5_flow_action_job_init(priv, MLX5_HW_INV_QUEUE, NULL, NULL,\n+\t\t\t\t\tNULL, MLX5_HW_Q_JOB_TYPE_CREATE, NULL);\n+\tif (!job)\n+\t\treturn -rte_mtr_error_set(error, ENOMEM,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_ID,\n+\t\t\t\t\t  NULL, \"No job context.\");\n+\tret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, aso_mtr,\n+\t\t\t\t\t   &priv->mtr_bulk, job, true);\n+\tif (ret) {\n+\t\tflow_hw_job_put(priv, job, MLX5_HW_INV_QUEUE);\n \t\treturn -rte_mtr_error_set(error, ENOTSUP,\n-\t\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED,\n-\t\t\tNULL, \"Failed to create devx meter.\");\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL, \"Failed to create devx meter.\");\n+\t}\n \tfm->active_state = params->meter_enable;\n \t__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);\n \t__atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);\n@@ -2911,7 +2920,7 @@ mlx5_flow_meter_attach(struct mlx5_priv *priv,\n \t\tstruct mlx5_aso_mtr *aso_mtr;\n \n \t\taso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);\n-\t\tif (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {\n+\t\tif (mlx5_aso_mtr_wait(priv, aso_mtr, false)) {\n \t\t\treturn rte_flow_error_set(error, ENOENT,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\tNULL,\n",
    "prefixes": [
        "v2",
        "2/3"
    ]
}