get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117013/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117013,
    "url": "http://patches.dpdk.org/api/patches/117013/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220928033130.9106-15-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220928033130.9106-15-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220928033130.9106-15-suanmingm@nvidia.com",
    "date": "2022-09-28T03:31:27",
    "name": "[v2,14/17] net/mlx5: add async action push and pull support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "83d40ed3f8ca822c05a543fd42211c73cfd642b3",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220928033130.9106-15-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 24870,
            "url": "http://patches.dpdk.org/api/series/24870/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24870",
            "date": "2022-09-28T03:31:15",
            "name": "net/mlx5: HW steering PMD update",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/24870/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/117013/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/117013/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7B657A00C2;\n\tWed, 28 Sep 2022 05:34:20 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 55FE142BC8;\n\tWed, 28 Sep 2022 05:32:41 +0200 (CEST)",
            "from NAM11-CO1-obe.outbound.protection.outlook.com\n (mail-co1nam11on2074.outbound.protection.outlook.com [40.107.220.74])\n by mails.dpdk.org (Postfix) with ESMTP id E485142BAE\n for <dev@dpdk.org>; Wed, 28 Sep 2022 05:32:37 +0200 (CEST)",
            "from DM6PR05CA0047.namprd05.prod.outlook.com (2603:10b6:5:335::16)\n by PH0PR12MB7079.namprd12.prod.outlook.com (2603:10b6:510:21d::8) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5654.26; Wed, 28 Sep\n 2022 03:32:33 +0000",
            "from DM6NAM11FT049.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:335:cafe::36) by DM6PR05CA0047.outlook.office365.com\n (2603:10b6:5:335::16) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.9 via Frontend\n Transport; Wed, 28 Sep 2022 03:32:33 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n DM6NAM11FT049.mail.protection.outlook.com (10.13.172.188) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.17 via Frontend Transport; Wed, 28 Sep 2022 03:32:32 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Tue, 27 Sep\n 2022 20:32:19 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Tue, 27 Sep\n 2022 20:32:17 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=DRfr34nEmIvC5aPCECRfegZZYd7dvYVJp2d59cSZDcZgQaVIWTh915ISZozI8l+BX1S5XQcSlNimr2rBiuixYzt4xQDAoJrtIkAgUELAE3bwF+280WH8WisgOUjKwHfbuMWbKj/UtNo75KW3lbZM2aRgBeJJfjXkFm1PupS8BHNZVWwyInt0jDgKq1/EUbn+VGeQa4pGwvv7SLxp9Rkv5hwEys9oazLf3CPb3ohkSOcQBaqpa5FfHlTiUavq/OSjo9S1V9RnpPhWszPlNdRhoDI4zzdf3ojJ4viR0NqhjGKrILety9EwGeKqCo8TZp8HNhD9/2s0V6JJpp/Eias4Tg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=1IkUwRH4Fa48xyzBsHO/8OH6UoEIVrWzWAlvoZrznSw=;\n b=HSB7RZzkhW0e4yEnmSfgdIj7EWV54ghCxFMzOgVw0+aSfbIaWYuloLyLJn923dd7vz26Wj1lkPepZgTAXX4/h8W+3ZB4Zqf8mRApsydnQ54/VP8Tr2NsPUI/YqycDMH/CRAmvyj/ira94KqztX+8K5IIqsOI+IjfEFS3U0Ni03e3wLusXkFXVW052hE7lRWmNOmlYK5Zrtp6Ehl5DGMO77p4dmV7r4lN3irVe8LBvgy4rMoP1it/7j/gmcd8XKKVxx4Q8MDCyxiB6EWGmJym33lJSiBOZ9yBixN75lPldsYuMIc7gM6knWX4Um6gce2jrcbWBBKbVy4T2tu7RmcehQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=1IkUwRH4Fa48xyzBsHO/8OH6UoEIVrWzWAlvoZrznSw=;\n b=qGm2w9TVlJ6wJlcF+2dTdpQv2QGNSQn3hf7TzBEmE5kPX11OgpU5XXLxnq2J8tBC6HeCAF8FpyPNlhv/zbS/gYGap0Jrf6CFGUv7IDlj3ZUL+2KOtoBd8WWbYW2A5jdi81+DaFwRheyQ3kN+8BTxnTYhNps9VVn4U/6DH3tXhA16mo+Q6vpXkPOvZMSnTUU+YdKe9KcrErg9LsM63I/E9fR/pUAoCWCaoTSXwMhsxL8ZwDCQWVpbvVS+nljhc0K7PZJGVVIbkINfYFiJLqJD4i7BdjvMybjeG7s3TVI/3LP8/k/D2GRPYmdoRN7GGGZtB9kb0USzETTrpXdcTk9aAQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>, <orika@nvidia.com>",
        "Subject": "[PATCH v2 14/17] net/mlx5: add async action push and pull support",
        "Date": "Wed, 28 Sep 2022 06:31:27 +0300",
        "Message-ID": "<20220928033130.9106-15-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220928033130.9106-1-suanmingm@nvidia.com>",
        "References": "<20220923144334.27736-1-suanmingm@nvidia.com>\n <20220928033130.9106-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT049:EE_|PH0PR12MB7079:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "b31f5b46-d8b5-48d8-8433-08daa102146b",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 2RGwCcKDRFYflOkjVvp+CZCD/ev0I/vMRyTv36unIp072r81b6Cj5RdLL6wDoW9SQUQ2gOB8rCVTw38phnbKXnRH7qk7Ba/5RcPRNiJ8zAolz8CnDMqDv2TZO2oLxHOkD/C7lMVJqxFs4lxqzxOAoyFQeI7Pl5tl3UkWPELp96Ld7zZUjGXs7VGiMOZjGKwdqO5F3/ScQ+RZ1B7suF5lEW9wU9ovvALPN/jmjtjBFAdLyRnQ97XpM+tBQ6i9wj0t91HEo9Vx0tm02eFdYIjAaJI/4U08PFIAnwJqjiaAwOp80AnTiIX8kvzP2PWxjPP44wuC+sfS0Uc5POwVqb25DqFcxB3t5TlwDmK5olL17v5OM8NgPDFe7o1cs8BGZADppbKzvntQS93iMdHX38hnp/JNRxhhC/WroPYPTkJ6Wkm9KxGlN5rV5pp4WnprLgtUm6LRNyzf+s6yLZSXq4hOyXb0Qf03XBW6FwycNP5S2uirLNZyqxrVi9J5cppIDv5R4q9qQnUcc1Rx/+QA/OYOdyfeXszPQKsBFb58iElxphkOgqXWU0UVscTcvb6yq1/qdEtgZcE4DpxQ+/9gAenoCIJYO8N1FAI/vkMxJeaZXs0NmYvKqtClFar9QX3LgnyBf746eadAVnDW1OQXJl3LpFqU7gKVvSj3cesSpuokIs6IBhKlXniWScrpW8f1XkDcQ/gkSrhlizY2brxbDIKrVBijMeq23cOPY2siN/ChKo0el0+hoZ9m97M0VC691E7R5lYDHCtmUq4+/nOqsV3yeMpIdY+adb+KfOzTBveUC0E=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(39860400002)(136003)(376002)(346002)(396003)(451199015)(40470700004)(36840700001)(46966006)(6636002)(36756003)(82740400003)(186003)(1076003)(83380400001)(2616005)(336012)(16526019)(426003)(86362001)(7636003)(47076005)(356005)(36860700001)(6286002)(55016003)(40460700003)(478600001)(6666004)(107886003)(7696005)(8676002)(40480700001)(110136005)(26005)(70206006)(54906003)(316002)(70586007)(8936002)(5660300002)(30864003)(2906002)(41300700001)(4326008)(82310400005)(559001)(579004)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Sep 2022 03:32:32.9381 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b31f5b46-d8b5-48d8-8433-08daa102146b",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT049.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH0PR12MB7079",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The queue based rte_flow_async_action_* functions work same as\nqueue based async flow functions. The operations can be pushed\nasynchronously, so is the pull.\n\nThis commit adds the async action missing push and pull support.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h            |  62 ++++-\n drivers/net/mlx5/mlx5_flow.c       |  45 ++++\n drivers/net/mlx5/mlx5_flow.h       |  17 ++\n drivers/net/mlx5/mlx5_flow_aso.c   | 181 +++++++++++--\n drivers/net/mlx5/mlx5_flow_dv.c    |   7 +-\n drivers/net/mlx5/mlx5_flow_hw.c    | 412 +++++++++++++++++++++++++----\n drivers/net/mlx5/mlx5_flow_meter.c |   6 +-\n 7 files changed, 626 insertions(+), 104 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex eca719f269..5d92df8965 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -341,6 +341,8 @@ struct mlx5_lb_ctx {\n enum {\n \tMLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */\n \tMLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */\n+\tMLX5_HW_Q_JOB_TYPE_UPDATE,\n+\tMLX5_HW_Q_JOB_TYPE_QUERY,\n };\n \n #define MLX5_HW_MAX_ITEMS (16)\n@@ -348,12 +350,23 @@ enum {\n /* HW steering flow management job descriptor. */\n struct mlx5_hw_q_job {\n \tuint32_t type; /* Job type. */\n-\tstruct rte_flow_hw *flow; /* Flow attached to the job. */\n+\tunion {\n+\t\tstruct rte_flow_hw *flow; /* Flow attached to the job. */\n+\t\tconst void *action; /* Indirect action attached to the job. */\n+\t};\n \tvoid *user_data; /* Job user data. */\n \tuint8_t *encap_data; /* Encap data. */\n \tstruct mlx5_modification_cmd *mhdr_cmd;\n \tstruct rte_flow_item *items;\n-\tstruct rte_flow_item_ethdev port_spec;\n+\tunion {\n+\t\tstruct {\n+\t\t\t/* Pointer to ct query user memory. */\n+\t\t\tstruct rte_flow_action_conntrack *profile;\n+\t\t\t/* Pointer to ct ASO query out memory. */\n+\t\t\tvoid *out_data;\n+\t\t} __rte_packed;\n+\t\tstruct rte_flow_item_ethdev port_spec;\n+\t} __rte_packed;\n };\n \n /* HW steering job descriptor LIFO pool. */\n@@ -361,6 +374,8 @@ struct mlx5_hw_q {\n \tuint32_t job_idx; /* Free job index. */\n \tuint32_t size; /* LIFO size. */\n \tstruct mlx5_hw_q_job **job; /* LIFO header. */\n+\tstruct rte_ring *indir_cq; /* Indirect action SW completion queue. */\n+\tstruct rte_ring *indir_iq; /* Indirect action SW in progress queue. */\n } __rte_cache_aligned;\n \n \n@@ -569,6 +584,7 @@ struct mlx5_aso_sq_elem {\n \t\t\tstruct mlx5_aso_ct_action *ct;\n \t\t\tchar *query_data;\n \t\t};\n+\t\tvoid *user_data;\n \t};\n };\n \n@@ -578,7 +594,9 @@ struct mlx5_aso_sq {\n \tstruct mlx5_aso_cq cq;\n \tstruct mlx5_devx_sq sq_obj;\n \tstruct mlx5_pmd_mr mr;\n+\tvolatile struct mlx5_aso_wqe *db;\n \tuint16_t pi;\n+\tuint16_t db_pi;\n \tuint32_t head;\n \tuint32_t tail;\n \tuint32_t sqn;\n@@ -993,6 +1011,7 @@ struct mlx5_flow_meter_profile {\n enum mlx5_aso_mtr_state {\n \tASO_METER_FREE, /* In free list. */\n \tASO_METER_WAIT, /* ACCESS_ASO WQE in progress. */\n+\tASO_METER_WAIT_ASYNC, /* CQE will be handled by async pull. */\n \tASO_METER_READY, /* CQE received. */\n };\n \n@@ -1195,6 +1214,7 @@ struct mlx5_bond_info {\n enum mlx5_aso_ct_state {\n \tASO_CONNTRACK_FREE, /* Inactive, in the free list. */\n \tASO_CONNTRACK_WAIT, /* WQE sent in the SQ. */\n+\tASO_CONNTRACK_WAIT_ASYNC, /* CQE will be handled by async pull. */\n \tASO_CONNTRACK_READY, /* CQE received w/o error. */\n \tASO_CONNTRACK_QUERY, /* WQE for query sent. */\n \tASO_CONNTRACK_MAX, /* Guard. */\n@@ -1203,13 +1223,21 @@ enum mlx5_aso_ct_state {\n /* Generic ASO connection tracking structure. */\n struct mlx5_aso_ct_action {\n \tunion {\n-\t\tLIST_ENTRY(mlx5_aso_ct_action) next;\n-\t\t/* Pointer to the next ASO CT. Used only in SWS. */\n-\t\tstruct mlx5_aso_ct_pool *pool;\n-\t\t/* Pointer to action pool. Used only in HWS. */\n+\t\t/* SWS mode struct. */\n+\t\tstruct {\n+\t\t\t/* Pointer to the next ASO CT. Used only in SWS. */\n+\t\t\tLIST_ENTRY(mlx5_aso_ct_action) next;\n+\t\t};\n+\t\t/* HWS mode struct. */\n+\t\tstruct {\n+\t\t\t/* Pointer to action pool. Used only in HWS. */\n+\t\t\tstruct mlx5_aso_ct_pool *pool;\n+\t\t};\n \t};\n-\tvoid *dr_action_orig; /* General action object for original dir. */\n-\tvoid *dr_action_rply; /* General action object for reply dir. */\n+\t/* General action object for original dir. */\n+\tvoid *dr_action_orig;\n+\t/* General action object for reply dir. */\n+\tvoid *dr_action_rply;\n \tuint32_t refcnt; /* Action used count in device flows. */\n \tuint16_t offset; /* Offset of ASO CT in DevX objects bulk. */\n \tuint16_t peer; /* The only peer port index could also use this CT. */\n@@ -2135,18 +2163,21 @@ int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);\n void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n \t\t\t   enum mlx5_access_aso_opc_mod aso_opc_mod);\n int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n-\t\t\t\t struct mlx5_aso_mtr *mtr,\n-\t\t\t\t struct mlx5_mtr_bulk *bulk);\n+\t\tstruct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk,\n+\t\tvoid *user_data, bool push);\n int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\tstruct mlx5_aso_mtr *mtr);\n int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n-\t\t\t      const struct rte_flow_action_conntrack *profile);\n+\t\t\t      const struct rte_flow_action_conntrack *profile,\n+\t\t\t      void *user_data,\n+\t\t\t      bool push);\n int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t   struct mlx5_aso_ct_action *ct);\n int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t     struct mlx5_aso_ct_action *ct,\n-\t\t\t     struct rte_flow_action_conntrack *profile);\n+\t\t\t     struct rte_flow_action_conntrack *profile,\n+\t\t\t     void *user_data, bool push);\n int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t  struct mlx5_aso_ct_action *ct);\n uint32_t\n@@ -2154,6 +2185,13 @@ mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);\n uint32_t\n mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);\n \n+void mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,\n+\t\t\t     char *wdata);\n+void mlx5_aso_push_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t       struct mlx5_aso_sq *sq);\n+int mlx5_aso_pull_completion(struct mlx5_aso_sq *sq,\n+\t\t\t     struct rte_flow_op_result res[],\n+\t\t\t     uint16_t n_res);\n int mlx5_aso_cnt_queue_init(struct mlx5_dev_ctx_shared *sh);\n void mlx5_aso_cnt_queue_uninit(struct mlx5_dev_ctx_shared *sh);\n int mlx5_aso_cnt_query(struct mlx5_dev_ctx_shared *sh,\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 4bfa604578..bc2ccb4d3c 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -979,6 +979,14 @@ mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t  void *user_data,\n \t\t\t\t  struct rte_flow_error *error);\n \n+static int\n+mlx5_flow_async_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t\t const struct rte_flow_action_handle *handle,\n+\t\t\t\t void *data,\n+\t\t\t\t void *user_data,\n+\t\t\t\t struct rte_flow_error *error);\n+\n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n \t.create = mlx5_flow_create,\n@@ -1015,6 +1023,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.push = mlx5_flow_push,\n \t.async_action_handle_create = mlx5_flow_async_action_handle_create,\n \t.async_action_handle_update = mlx5_flow_async_action_handle_update,\n+\t.async_action_handle_query = mlx5_flow_async_action_handle_query,\n \t.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,\n };\n \n@@ -8858,6 +8867,42 @@ mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t\t update, user_data, error);\n }\n \n+/**\n+ * Query shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] handle\n+ *   Action handle to be updated.\n+ * @param[in] data\n+ *   Pointer query result data.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_async_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t    const struct rte_flow_op_attr *attr,\n+\t\t\t\t    const struct rte_flow_action_handle *handle,\n+\t\t\t\t    void *data,\n+\t\t\t\t    void *user_data,\n+\t\t\t\t    struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops =\n+\t\t\tflow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\n+\treturn fops->async_action_query(dev, queue, attr, handle,\n+\t\t\t\t\tdata, user_data, error);\n+}\n+\n /**\n  * Destroy shared action.\n  *\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 30a18ea35e..e45869a890 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -57,6 +57,13 @@ enum mlx5_rte_flow_field_id {\n \n #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29\n \n+#define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \\\n+\t(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)\n+\n+#define MLX5_INDIRECT_ACTION_IDX_GET(handle) \\\n+\t(((uint32_t)(uintptr_t)(handle)) & \\\n+\t ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))\n+\n enum {\n \tMLX5_INDIRECT_ACTION_TYPE_RSS,\n \tMLX5_INDIRECT_ACTION_TYPE_AGE,\n@@ -1816,6 +1823,15 @@ typedef int (*mlx5_flow_async_action_handle_update_t)\n \t\t\t void *user_data,\n \t\t\t struct rte_flow_error *error);\n \n+typedef int (*mlx5_flow_async_action_handle_query_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t uint32_t queue,\n+\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t const struct rte_flow_action_handle *handle,\n+\t\t\t void *data,\n+\t\t\t void *user_data,\n+\t\t\t struct rte_flow_error *error);\n+\n typedef int (*mlx5_flow_async_action_handle_destroy_t)\n \t\t\t(struct rte_eth_dev *dev,\n \t\t\t uint32_t queue,\n@@ -1878,6 +1894,7 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_push_t push;\n \tmlx5_flow_async_action_handle_create_t async_action_create;\n \tmlx5_flow_async_action_handle_update_t async_action_update;\n+\tmlx5_flow_async_action_handle_query_t async_action_query;\n \tmlx5_flow_async_action_handle_destroy_t async_action_destroy;\n };\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex f371fff2e2..43ef893e9d 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -519,6 +519,70 @@ mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)\n \t\t\t       (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);\n }\n \n+int\n+mlx5_aso_pull_completion(struct mlx5_aso_sq *sq,\n+\t\t\t struct rte_flow_op_result res[],\n+\t\t\t uint16_t n_res)\n+{\n+\tstruct mlx5_aso_cq *cq = &sq->cq;\n+\tvolatile struct mlx5_cqe *restrict cqe;\n+\tconst uint32_t cq_size = 1 << cq->log_desc_n;\n+\tconst uint32_t mask = cq_size - 1;\n+\tuint32_t idx;\n+\tuint32_t next_idx;\n+\tuint16_t max;\n+\tuint16_t n = 0;\n+\tint ret;\n+\n+\tmax = (uint16_t)(sq->head - sq->tail);\n+\tif (unlikely(!max || !n_res))\n+\t\treturn 0;\n+\tnext_idx = cq->cq_ci & mask;\n+\tdo {\n+\t\tidx = next_idx;\n+\t\tnext_idx = (cq->cq_ci + 1) & mask;\n+\t\t/* Need to confirm the position of the prefetch. */\n+\t\trte_prefetch0(&cq->cq_obj.cqes[next_idx]);\n+\t\tcqe = &cq->cq_obj.cqes[idx];\n+\t\tret = check_cqe(cqe, cq_size, cq->cq_ci);\n+\t\t/*\n+\t\t * Be sure owner read is done before any other cookie field or\n+\t\t * opaque field.\n+\t\t */\n+\t\trte_io_rmb();\n+\t\tif (ret == MLX5_CQE_STATUS_HW_OWN)\n+\t\t\tbreak;\n+\t\tres[n].user_data = sq->elts[(uint16_t)((sq->tail + n) & mask)].user_data;\n+\t\tif (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {\n+\t\t\tmlx5_aso_cqe_err_handle(sq);\n+\t\t\tres[n].status = RTE_FLOW_OP_ERROR;\n+\t\t} else {\n+\t\t\tres[n].status = RTE_FLOW_OP_SUCCESS;\n+\t\t}\n+\t\tcq->cq_ci++;\n+\t\tif (++n == n_res)\n+\t\t\tbreak;\n+\t} while (1);\n+\tif (likely(n)) {\n+\t\tsq->tail += n;\n+\t\trte_io_wmb();\n+\t\tcq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);\n+\t}\n+\treturn n;\n+}\n+\n+void\n+mlx5_aso_push_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t  struct mlx5_aso_sq *sq)\n+{\n+\tif (sq->db_pi == sq->pi)\n+\t\treturn;\n+\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)sq->db,\n+\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t   !sh->tx_uar.dbnc);\n+\tsq->db_pi = sq->pi;\n+}\n+\n /**\n  * Update ASO objects upon completion.\n  *\n@@ -728,7 +792,9 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t\t\t       struct mlx5_aso_sq *sq,\n \t\t\t       struct mlx5_aso_mtr *aso_mtr,\n \t\t\t       struct mlx5_mtr_bulk *bulk,\n-\t\t\t\t   bool need_lock)\n+\t\t\t       bool need_lock,\n+\t\t\t       void *user_data,\n+\t\t\t       bool push)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n \tstruct mlx5_flow_meter_info *fm = NULL;\n@@ -754,7 +820,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n \t/* Fill next WQE. */\n \tfm = &aso_mtr->fm;\n-\tsq->elts[sq->head & mask].mtr = aso_mtr;\n+\tsq->elts[sq->head & mask].mtr = user_data ? user_data : aso_mtr;\n \tif (aso_mtr->type == ASO_METER_INDIRECT) {\n \t\tif (likely(sh->config.dv_flow_en == 2))\n \t\t\tpool = aso_mtr->pool;\n@@ -820,9 +886,13 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t */\n \tsq->head++;\n \tsq->pi += 2;/* Each WQE contains 2 WQEBB's. */\n-\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n+\tif (push) {\n+\t\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n \t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n \t\t\t   !sh->tx_uar.dbnc);\n+\t\tsq->db_pi = sq->pi;\n+\t}\n+\tsq->db = wqe;\n \tif (need_lock)\n \t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n@@ -912,11 +982,14 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)\n int\n mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\tstruct mlx5_aso_mtr *mtr,\n-\t\t\tstruct mlx5_mtr_bulk *bulk)\n+\t\t\tstruct mlx5_mtr_bulk *bulk,\n+\t\t\tvoid *user_data,\n+\t\t\tbool push)\n {\n \tstruct mlx5_aso_sq *sq;\n \tuint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n \tbool need_lock;\n+\tint ret;\n \n \tif (likely(sh->config.dv_flow_en == 2)) {\n \t\tif (queue == MLX5_HW_INV_QUEUE) {\n@@ -930,10 +1003,15 @@ mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\tsq = &sh->mtrmng->pools_mng.sq;\n \t\tneed_lock = true;\n \t}\n+\tif (queue != MLX5_HW_INV_QUEUE) {\n+\t\tret = mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,\n+\t\t\t\t\t\t     need_lock, user_data, push);\n+\t\treturn ret > 0 ? 0 : -1;\n+\t}\n \tdo {\n \t\tmlx5_aso_mtr_completion_handle(sq, need_lock);\n-\t\tif (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr,\n-\t\t\t\t\t\t   bulk, need_lock))\n+\t\tif (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,\n+\t\t\t\t\t\t   need_lock, NULL, true))\n \t\t\treturn 0;\n \t\t/* Waiting for wqe resource. */\n \t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n@@ -962,6 +1040,7 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n {\n \tstruct mlx5_aso_sq *sq;\n \tuint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n+\tuint8_t state;\n \tbool need_lock;\n \n \tif (likely(sh->config.dv_flow_en == 2)) {\n@@ -976,8 +1055,8 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\tsq = &sh->mtrmng->pools_mng.sq;\n \t\tneed_lock = true;\n \t}\n-\tif (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==\n-\t\t\t\t\t    ASO_METER_READY)\n+\tstate = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);\n+\tif (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC)\n \t\treturn 0;\n \tdo {\n \t\tmlx5_aso_mtr_completion_handle(sq, need_lock);\n@@ -1093,7 +1172,9 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t\t\t      struct mlx5_aso_sq *sq,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n \t\t\t      const struct rte_flow_action_conntrack *profile,\n-\t\t\t      bool need_lock)\n+\t\t\t      bool need_lock,\n+\t\t\t      void *user_data,\n+\t\t\t      bool push)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n \tuint16_t size = 1 << sq->log_desc_n;\n@@ -1117,10 +1198,16 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \twqe = &sq->sq_obj.aso_wqes[sq->head & mask];\n \trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n \t/* Fill next WQE. */\n-\tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);\n-\tsq->elts[sq->head & mask].ct = ct;\n-\tsq->elts[sq->head & mask].query_data = NULL;\n+\tMLX5_ASO_CT_UPDATE_STATE(ct,\n+\t\t\tuser_data ? ASO_CONNTRACK_WAIT_ASYNC : ASO_CONNTRACK_WAIT);\n+\tif (user_data) {\n+\t\tsq->elts[sq->head & mask].user_data = user_data;\n+\t} else {\n+\t\tsq->elts[sq->head & mask].ct = ct;\n+\t\tsq->elts[sq->head & mask].query_data = NULL;\n+\t}\n \tpool = __mlx5_aso_ct_get_pool(sh, ct);\n+\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n \t\t\t\t\t\t  ct->offset);\n@@ -1200,9 +1287,13 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t\t profile->reply_dir.max_ack);\n \tsq->head++;\n \tsq->pi += 2; /* Each WQE contains 2 WQEBB's. */\n-\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n-\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n-\t\t\t   !sh->tx_uar.dbnc);\n+\tif (push) {\n+\t\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n+\t\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t\t   !sh->tx_uar.dbnc);\n+\t\tsq->db_pi = sq->pi;\n+\t}\n+\tsq->db = wqe;\n \tif (need_lock)\n \t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n@@ -1258,7 +1349,9 @@ static int\n mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \t\t\t    struct mlx5_aso_sq *sq,\n \t\t\t    struct mlx5_aso_ct_action *ct, char *data,\n-\t\t\t    bool need_lock)\n+\t\t\t    bool need_lock,\n+\t\t\t    void *user_data,\n+\t\t\t    bool push)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n \tuint16_t size = 1 << sq->log_desc_n;\n@@ -1284,14 +1377,23 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \t\tDRV_LOG(ERR, \"Fail: SQ is full and no free WQE to send\");\n \t\treturn 0;\n \t}\n-\tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_QUERY);\n+\tMLX5_ASO_CT_UPDATE_STATE(ct,\n+\t\t\tuser_data ? ASO_CONNTRACK_WAIT_ASYNC : ASO_CONNTRACK_QUERY);\n \twqe = &sq->sq_obj.aso_wqes[sq->head & mask];\n \t/* Confirm the location and address of the prefetch instruction. */\n \trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n \t/* Fill next WQE. */\n \twqe_idx = sq->head & mask;\n-\tsq->elts[wqe_idx].ct = ct;\n-\tsq->elts[wqe_idx].query_data = data;\n+\t/* Check if this is async mode. */\n+\tif (user_data) {\n+\t\tstruct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)user_data;\n+\n+\t\tsq->elts[wqe_idx].ct = user_data;\n+\t\tjob->out_data = (char *)((uintptr_t)sq->mr.addr + wqe_idx * 64);\n+\t} else {\n+\t\tsq->elts[wqe_idx].query_data = data;\n+\t\tsq->elts[wqe_idx].ct = ct;\n+\t}\n \tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n@@ -1317,9 +1419,13 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \t * data segment is not used in this case.\n \t */\n \tsq->pi += 2;\n-\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n-\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n-\t\t\t   !sh->tx_uar.dbnc);\n+\tif (push) {\n+\t\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n+\t\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t\t   !sh->tx_uar.dbnc);\n+\t\tsq->db_pi = sq->pi;\n+\t}\n+\tsq->db = wqe;\n \tif (need_lock)\n \t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n@@ -1405,20 +1511,29 @@ int\n mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\t\t  uint32_t queue,\n \t\t\t  struct mlx5_aso_ct_action *ct,\n-\t\t\t  const struct rte_flow_action_conntrack *profile)\n+\t\t\t  const struct rte_flow_action_conntrack *profile,\n+\t\t\t  void *user_data,\n+\t\t\t  bool push)\n {\n \tuint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n \tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n \tstruct mlx5_aso_sq *sq;\n \tbool need_lock = !!(queue == MLX5_HW_INV_QUEUE);\n+\tint ret;\n \n \tif (sh->config.dv_flow_en == 2)\n \t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n \telse\n \t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n+\tif (queue != MLX5_HW_INV_QUEUE) {\n+\t\tret = mlx5_aso_ct_sq_enqueue_single(sh, sq, ct, profile,\n+\t\t\t\t\t\t    need_lock, user_data, push);\n+\t\treturn ret > 0 ? 0 : -1;\n+\t}\n \tdo {\n-\t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n-\t\tif (mlx5_aso_ct_sq_enqueue_single(sh, sq, ct, profile, need_lock))\n+\t\tmlx5_aso_ct_completion_handle(sh, sq,  need_lock);\n+\t\tif (mlx5_aso_ct_sq_enqueue_single(sh, sq, ct, profile,\n+\t\t\t\t\t\t  need_lock, NULL, true))\n \t\t\treturn 0;\n \t\t/* Waiting for wqe resource. */\n \t\trte_delay_us_sleep(10u);\n@@ -1478,7 +1593,7 @@ mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n  * @param[in] wdata\n  *   Pointer to data fetched from hardware.\n  */\n-static inline void\n+void\n mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,\n \t\t\tchar *wdata)\n {\n@@ -1562,7 +1677,8 @@ int\n mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\t\t uint32_t queue,\n \t\t\t struct mlx5_aso_ct_action *ct,\n-\t\t\t struct rte_flow_action_conntrack *profile)\n+\t\t\t struct rte_flow_action_conntrack *profile,\n+\t\t\t void *user_data, bool push)\n {\n \tuint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n \tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n@@ -1575,9 +1691,15 @@ mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n \telse\n \t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n+\tif (queue != MLX5_HW_INV_QUEUE) {\n+\t\tret = mlx5_aso_ct_sq_query_single(sh, sq, ct, out_data,\n+\t\t\t\t\t\t  need_lock, user_data, push);\n+\t\treturn ret > 0 ? 0 : -1;\n+\t}\n \tdo {\n \t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n-\t\tret = mlx5_aso_ct_sq_query_single(sh, sq, ct, out_data, need_lock);\n+\t\tret = mlx5_aso_ct_sq_query_single(sh, sq, ct, out_data,\n+\t\t\t\tneed_lock, NULL, true);\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t\telse if (ret > 0)\n@@ -1628,7 +1750,8 @@ mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,\n \t\trte_errno = ENXIO;\n \t\treturn -rte_errno;\n \t} else if (state == ASO_CONNTRACK_READY ||\n-\t\t   state == ASO_CONNTRACK_QUERY) {\n+\t\t   state == ASO_CONNTRACK_QUERY ||\n+\t\t   state == ASO_CONNTRACK_WAIT_ASYNC) {\n \t\treturn 0;\n \t}\n \tdo {\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 58a7e94ee0..085cb23c78 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -13091,7 +13091,7 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"Failed to allocate CT object\");\n \tct = flow_aso_ct_get_by_dev_idx(dev, idx);\n-\tif (mlx5_aso_ct_update_by_wqe(sh, MLX5_HW_INV_QUEUE, ct, pro)) {\n+\tif (mlx5_aso_ct_update_by_wqe(sh, MLX5_HW_INV_QUEUE, ct, pro, NULL, true)) {\n \t\tflow_dv_aso_ct_dev_release(dev, idx);\n \t\trte_flow_error_set(error, EBUSY,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n@@ -15904,7 +15904,7 @@ __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,\n \t\tif (ret)\n \t\t\treturn ret;\n \t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n-\t\t\t\t\t\tct, new_prf);\n+\t\t\t\t\t\tct, new_prf, NULL, true);\n \t\tif (ret)\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -16740,7 +16740,8 @@ flow_dv_action_query(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\tct->peer;\n \t\t((struct rte_flow_action_conntrack *)data)->is_original_dir =\n \t\t\t\t\t\t\tct->is_original;\n-\t\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct, data))\n+\t\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct,\n+\t\t\t\t\tdata, NULL, true))\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\tNULL,\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 5c0981d385..1879c8e9ca 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -1161,9 +1161,9 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n }\n \n static __rte_always_inline struct mlx5_aso_mtr *\n-flow_hw_meter_mark_alloc(struct rte_eth_dev *dev,\n-\t\t\t   const struct rte_flow_action *action,\n-\t\t\t   uint32_t queue)\n+flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t const struct rte_flow_action *action,\n+\t\t\t void *user_data, bool push)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n@@ -1183,13 +1183,14 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev,\n \tfm->is_enable = meter_mark->state;\n \tfm->color_aware = meter_mark->color_mode;\n \taso_mtr->pool = pool;\n-\taso_mtr->state = ASO_METER_WAIT;\n+\taso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?\n+\t\t\t  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;\n \taso_mtr->offset = mtr_id - 1;\n \taso_mtr->init_color = (meter_mark->color_mode) ?\n \t\tmeter_mark->init_color : RTE_COLOR_GREEN;\n \t/* Update ASO flow meter by wqe. */\n \tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,\n-\t\t\t\t\t &priv->mtr_bulk)) {\n+\t\t\t\t\t &priv->mtr_bulk, user_data, push)) {\n \t\tmlx5_ipool_free(pool->idx_pool, mtr_id);\n \t\treturn NULL;\n \t}\n@@ -1214,7 +1215,7 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \n-\taso_mtr = flow_hw_meter_mark_alloc(dev, action, queue);\n+\taso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true);\n \tif (!aso_mtr)\n \t\treturn -1;\n \n@@ -2278,9 +2279,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\trte_col_2_mlx5_col(aso_mtr->init_color);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\t\t/*\n+\t\t\t * Allocate meter directly will slow down flow\n+\t\t\t * insertion rate.\n+\t\t\t */\n \t\t\tret = flow_hw_meter_mark_compile(dev,\n \t\t\t\tact_data->action_dst, action,\n-\t\t\t\trule_acts, &job->flow->mtr_id, queue);\n+\t\t\t\trule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE);\n \t\t\tif (ret != 0)\n \t\t\t\treturn ret;\n \t\t\tbreak;\n@@ -2587,6 +2592,74 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,\n \t}\n }\n \n+static inline int\n+__flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,\n+\t\t\t\t uint32_t queue,\n+\t\t\t\t struct rte_flow_op_result res[],\n+\t\t\t\t uint16_t n_res)\n+\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_ring *r = priv->hw_q[queue].indir_cq;\n+\tstruct mlx5_hw_q_job *job;\n+\tvoid *user_data = NULL;\n+\tuint32_t type, idx;\n+\tstruct mlx5_aso_mtr *aso_mtr;\n+\tstruct mlx5_aso_ct_action *aso_ct;\n+\tint ret_comp, i;\n+\n+\tret_comp = (int)rte_ring_count(r);\n+\tif (ret_comp > n_res)\n+\t\tret_comp = n_res;\n+\tfor (i = 0; i < ret_comp; i++) {\n+\t\trte_ring_dequeue(r, &user_data);\n+\t\tres[i].user_data = user_data;\n+\t\tres[i].status = RTE_FLOW_OP_SUCCESS;\n+\t}\n+\tif (ret_comp < n_res && priv->hws_mpool)\n+\t\tret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],\n+\t\t\t\t&res[ret_comp], n_res - ret_comp);\n+\tif (ret_comp < n_res && priv->hws_ctpool)\n+\t\tret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],\n+\t\t\t\t&res[ret_comp], n_res - ret_comp);\n+\tfor (i = 0; i <  ret_comp; i++) {\n+\t\tjob = (struct mlx5_hw_q_job *)res[i].user_data;\n+\t\t/* Restore user data. */\n+\t\tres[i].user_data = job->user_data;\n+\t\tif (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {\n+\t\t\ttype = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);\n+\t\t\tif (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {\n+\t\t\t\tidx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);\n+\t\t\t\tmlx5_ipool_free(priv->hws_mpool->idx_pool, idx);\n+\t\t\t}\n+\t\t} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {\n+\t\t\ttype = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);\n+\t\t\tif (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {\n+\t\t\t\tidx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);\n+\t\t\t\taso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);\n+\t\t\t\taso_mtr->state = ASO_METER_READY;\n+\t\t\t} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {\n+\t\t\t\tidx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t\t((uint32_t)(uintptr_t)job->action);\n+\t\t\t\taso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);\n+\t\t\t\taso_ct->state = ASO_CONNTRACK_READY;\n+\t\t\t}\n+\t\t} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {\n+\t\t\ttype = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);\n+\t\t\tif (type == MLX5_INDIRECT_ACTION_TYPE_CT) {\n+\t\t\t\tidx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t\t((uint32_t)(uintptr_t)job->action);\n+\t\t\t\taso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);\n+\t\t\t\tmlx5_aso_ct_obj_analyze(job->profile,\n+\t\t\t\t\t\t\tjob->out_data);\n+\t\t\t\taso_ct->state = ASO_CONNTRACK_READY;\n+\t\t\t}\n+\t\t}\n+\t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n+\t}\n+\treturn ret_comp;\n+}\n+\n /**\n  * Pull the enqueued flows.\n  *\n@@ -2619,6 +2692,7 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \tstruct mlx5_hw_q_job *job;\n \tint ret, i;\n \n+\t/* 1. Pull the flow completion. */\n \tret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);\n \tif (ret < 0)\n \t\treturn rte_flow_error_set(error, rte_errno,\n@@ -2644,9 +2718,34 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \t\t}\n \t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n \t}\n+\t/* 2. Pull indirect action comp. */\n+\tif (ret < n_res)\n+\t\tret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],\n+\t\t\t\t\t\t\tn_res - ret);\n \treturn ret;\n }\n \n+static inline void\n+__flow_hw_push_action(struct rte_eth_dev *dev,\n+\t\t    uint32_t queue)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_ring *iq = priv->hw_q[queue].indir_iq;\n+\tstruct rte_ring *cq = priv->hw_q[queue].indir_cq;\n+\tvoid *job = NULL;\n+\tuint32_t ret, i;\n+\n+\tret = rte_ring_count(iq);\n+\tfor (i = 0; i < ret; i++) {\n+\t\trte_ring_dequeue(iq, &job);\n+\t\trte_ring_enqueue(cq, job);\n+\t}\n+\tif (priv->hws_ctpool)\n+\t\tmlx5_aso_push_wqe(priv->sh, &priv->ct_mng->aso_sqs[queue]);\n+\tif (priv->hws_mpool)\n+\t\tmlx5_aso_push_wqe(priv->sh, &priv->hws_mpool->sq[queue]);\n+}\n+\n /**\n  * Push the enqueued flows to HW.\n  *\n@@ -2670,6 +2769,7 @@ flow_hw_push(struct rte_eth_dev *dev,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tint ret;\n \n+\t__flow_hw_push_action(dev, queue);\n \tret = mlx5dr_send_queue_action(priv->dr_ctx, queue,\n \t\t\t\t       MLX5DR_SEND_QUEUE_ACTION_DRAIN);\n \tif (ret) {\n@@ -5906,7 +6006,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t/* Adds one queue to be used by PMD.\n \t * The last queue will be used by the PMD.\n \t */\n-\tuint16_t nb_q_updated;\n+\tuint16_t nb_q_updated = 0;\n \tstruct rte_flow_queue_attr **_queue_attr = NULL;\n \tstruct rte_flow_queue_attr ctrl_queue_attr = {0};\n \tbool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);\n@@ -5973,6 +6073,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\tgoto err;\n \t}\n \tfor (i = 0; i < nb_q_updated; i++) {\n+\t\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n \t\tuint8_t *encap = NULL;\n \t\tstruct mlx5_modification_cmd *mhdr_cmd = NULL;\n \t\tstruct rte_flow_item *items = NULL;\n@@ -6000,6 +6101,22 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t\tjob[j].items = &items[j * MLX5_HW_MAX_ITEMS];\n \t\t\tpriv->hw_q[i].job[j] = &job[j];\n \t\t}\n+\t\tsnprintf(mz_name, sizeof(mz_name), \"port_%u_indir_act_cq_%u\",\n+\t\t\t dev->data->port_id, i);\n+\t\tpriv->hw_q[i].indir_cq = rte_ring_create(mz_name,\n+\t\t\t\t_queue_attr[i]->size, SOCKET_ID_ANY,\n+\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ |\n+\t\t\t\tRING_F_EXACT_SZ);\n+\t\tif (!priv->hw_q[i].indir_cq)\n+\t\t\tgoto err;\n+\t\tsnprintf(mz_name, sizeof(mz_name), \"port_%u_indir_act_iq_%u\",\n+\t\t\t dev->data->port_id, i);\n+\t\tpriv->hw_q[i].indir_iq = rte_ring_create(mz_name,\n+\t\t\t\t_queue_attr[i]->size, SOCKET_ID_ANY,\n+\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ |\n+\t\t\t\tRING_F_EXACT_SZ);\n+\t\tif (!priv->hw_q[i].indir_iq)\n+\t\t\tgoto err;\n \t}\n \tdr_ctx_attr.pd = priv->sh->cdev->pd;\n \tdr_ctx_attr.queues = nb_q_updated;\n@@ -6117,6 +6234,12 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \tflow_hw_destroy_vlan(dev);\n \tif (dr_ctx)\n \t\tclaim_zero(mlx5dr_context_close(dr_ctx));\n+\tfor (i = 0; i < nb_q_updated; i++) {\n+\t\tif (priv->hw_q[i].indir_iq)\n+\t\t\trte_ring_free(priv->hw_q[i].indir_iq);\n+\t\tif (priv->hw_q[i].indir_cq)\n+\t\t\trte_ring_free(priv->hw_q[i].indir_cq);\n+\t}\n \tmlx5_free(priv->hw_q);\n \tpriv->hw_q = NULL;\n \tif (priv->acts_ipool) {\n@@ -6146,7 +6269,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \tstruct rte_flow_template_table *tbl;\n \tstruct rte_flow_pattern_template *it;\n \tstruct rte_flow_actions_template *at;\n-\tint i;\n+\tuint32_t i;\n \n \tif (!priv->dr_ctx)\n \t\treturn;\n@@ -6192,6 +6315,10 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \t\tflow_hw_ct_mng_destroy(dev, priv->ct_mng);\n \t\tpriv->ct_mng = NULL;\n \t}\n+\tfor (i = 0; i < priv->nb_queue; i++) {\n+\t\trte_ring_free(priv->hw_q[i].indir_iq);\n+\t\trte_ring_free(priv->hw_q[i].indir_cq);\n+\t}\n \tmlx5_free(priv->hw_q);\n \tpriv->hw_q = NULL;\n \tclaim_zero(mlx5dr_context_close(priv->dr_ctx));\n@@ -6380,8 +6507,9 @@ flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,\n }\n \n static int\n-flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n+flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,\n \t\t\tstruct rte_flow_action_conntrack *profile,\n+\t\t\tvoid *user_data, bool push,\n \t\t\tstruct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -6405,7 +6533,7 @@ flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n \t}\n \tprofile->peer_port = ct->peer;\n \tprofile->is_original_dir = ct->is_original;\n-\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct, profile))\n+\tif (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))\n \t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL,\n@@ -6417,7 +6545,8 @@ flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n static int\n flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t const struct rte_flow_modify_conntrack *action_conf,\n-\t\t\t uint32_t idx, struct rte_flow_error *error)\n+\t\t\t uint32_t idx, void *user_data, bool push,\n+\t\t\t struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n@@ -6448,7 +6577,8 @@ flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\tret = mlx5_validate_action_ct(dev, new_prf, error);\n \t\tif (ret)\n \t\t\treturn ret;\n-\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf);\n+\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,\n+\t\t\t\t\t\tuser_data, push);\n \t\tif (ret)\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -6470,6 +6600,7 @@ flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n static struct rte_flow_action_handle *\n flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t const struct rte_flow_action_conntrack *pro,\n+\t\t\t void *user_data, bool push,\n \t\t\t struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -6496,7 +6627,7 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n \tct->is_original = !!pro->is_original_dir;\n \tct->peer = pro->peer_port;\n \tct->pool = pool;\n-\tif (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro)) {\n+\tif (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {\n \t\tmlx5_ipool_free(pool->cts, ct_idx);\n \t\trte_flow_error_set(error, EBUSY,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n@@ -6588,15 +6719,29 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t     struct rte_flow_error *error)\n {\n \tstruct rte_flow_action_handle *handle = NULL;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_action_age *age;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tcnt_id_t cnt_id;\n \tuint32_t mtr_id;\n \tuint32_t age_idx;\n+\tbool push = true;\n+\tbool aso = false;\n \n-\tRTE_SET_USED(attr);\n-\tRTE_SET_USED(user_data);\n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx)) {\n+\t\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Flow queue full.\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_CREATE;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (action->type) {\n \tcase RTE_FLOW_ACTION_TYPE_AGE:\n \t\tage = action->conf;\n@@ -6624,10 +6769,13 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t (uintptr_t)cnt_id;\n \t\tbreak;\n \tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n-\t\thandle = flow_hw_conntrack_create(dev, queue, action->conf, error);\n+\t\taso = true;\n+\t\thandle = flow_hw_conntrack_create(dev, queue, action->conf, job,\n+\t\t\t\t\t\t  push, error);\n \t\tbreak;\n \tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n-\t\taso_mtr = flow_hw_meter_mark_alloc(dev, action, queue);\n+\t\taso = true;\n+\t\taso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push);\n \t\tif (!aso_mtr)\n \t\t\tbreak;\n \t\tmtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<\n@@ -6640,7 +6788,20 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \tdefault:\n \t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t   NULL, \"action type not supported\");\n-\t\treturn NULL;\n+\t\tbreak;\n+\t}\n+\tif (job) {\n+\t\tif (!handle) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn handle;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n \t}\n \treturn handle;\n }\n@@ -6674,32 +6835,56 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t     void *user_data,\n \t\t\t     struct rte_flow_error *error)\n {\n-\tRTE_SET_USED(attr);\n-\tRTE_SET_USED(user_data);\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n+\tconst struct rte_flow_modify_conntrack *ct_conf =\n+\t\t(const struct rte_flow_modify_conntrack *)update;\n \tconst struct rte_flow_update_meter_mark *upd_meter_mark =\n \t\t(const struct rte_flow_update_meter_mark *)update;\n \tconst struct rte_flow_action_meter_mark *meter_mark;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tstruct mlx5_flow_meter_info *fm;\n \tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \tuint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n+\tint ret = 0;\n+\tbool push = true;\n+\tbool aso = false;\n \n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx))\n+\t\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Action update failed due to queue full.\");\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_UPDATE;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n-\t\treturn mlx5_hws_age_action_update(priv, idx, update, error);\n+\t\tret = mlx5_hws_age_action_update(priv, idx, update, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n-\t\treturn flow_hw_conntrack_update(dev, queue, update, act_idx, error);\n+\t\tif (ct_conf->state)\n+\t\t\taso = true;\n+\t\tret = flow_hw_conntrack_update(dev, queue, update, act_idx,\n+\t\t\t\t\t       job, push, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_METER_MARK:\n+\t\taso = true;\n \t\tmeter_mark = &upd_meter_mark->meter_mark;\n \t\t/* Find ASO object. */\n \t\taso_mtr = mlx5_ipool_get(pool->idx_pool, idx);\n-\t\tif (!aso_mtr)\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\tif (!aso_mtr) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Invalid meter_mark update index\");\n+\t\t\tbreak;\n+\t\t}\n \t\tfm = &aso_mtr->fm;\n \t\tif (upd_meter_mark->profile_valid)\n \t\t\tfm->profile = (struct mlx5_flow_meter_profile *)\n@@ -6713,25 +6898,46 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\tfm->is_enable = meter_mark->state;\n \t\t/* Update ASO flow meter by wqe. */\n \t\tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue,\n-\t\t\t\t\t\t aso_mtr, &priv->mtr_bulk))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t aso_mtr, &priv->mtr_bulk, job, push)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to update ASO meter WQE\");\n+\t\t\tbreak;\n+\t\t}\n \t\t/* Wait for ASO object completion. */\n \t\tif (queue == MLX5_HW_INV_QUEUE &&\n-\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to wait for ASO meter CQE\");\n+\t\t}\n \t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n-\t\treturn flow_dv_action_update(dev, handle, update, error);\n+\t\tret = flow_dv_action_update(dev, handle, update, error);\n+\t\tbreak;\n \tdefault:\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\tret = -ENOTSUP;\n+\t\trte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"action type not supported\");\n+\t\tbreak;\n \t}\n-\treturn 0;\n+\tif (job) {\n+\t\tif (ret) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn ret;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn 0;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n+\t}\n+\treturn ret;\n }\n \n /**\n@@ -6766,15 +6972,28 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \tuint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tstruct mlx5_flow_meter_info *fm;\n+\tbool push = true;\n+\tbool aso = false;\n+\tint ret = 0;\n \n-\tRTE_SET_USED(queue);\n-\tRTE_SET_USED(attr);\n-\tRTE_SET_USED(user_data);\n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx))\n+\t\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Action destroy failed due to queue full.\");\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_DESTROY;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n-\t\treturn mlx5_hws_age_action_destroy(priv, age_idx, error);\n+\t\tret = mlx5_hws_age_action_destroy(priv, age_idx, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\tage_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);\n \t\tif (age_idx != 0)\n@@ -6783,39 +7002,69 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t * time to update the AGE.\n \t\t\t */\n \t\t\tmlx5_hws_age_nb_cnt_decrease(priv, age_idx);\n-\t\treturn mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);\n+\t\tret = mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n-\t\treturn flow_hw_conntrack_destroy(dev, act_idx, error);\n+\t\tret = flow_hw_conntrack_destroy(dev, act_idx, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_METER_MARK:\n \t\taso_mtr = mlx5_ipool_get(pool->idx_pool, idx);\n-\t\tif (!aso_mtr)\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\tif (!aso_mtr) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Invalid meter_mark destroy index\");\n+\t\t\tbreak;\n+\t\t}\n \t\tfm = &aso_mtr->fm;\n \t\tfm->is_enable = 0;\n \t\t/* Update ASO flow meter by wqe. */\n \t\tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,\n-\t\t\t\t\t\t &priv->mtr_bulk))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t &priv->mtr_bulk, job, push)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to update ASO meter WQE\");\n+\t\t\tbreak;\n+\t\t}\n \t\t/* Wait for ASO object completion. */\n \t\tif (queue == MLX5_HW_INV_QUEUE &&\n-\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to wait for ASO meter CQE\");\n-\t\tmlx5_ipool_free(pool->idx_pool, idx);\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (!job)\n+\t\t\tmlx5_ipool_free(pool->idx_pool, idx);\n+\t\telse\n+\t\t\taso = true;\n \t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n-\t\treturn flow_dv_action_destroy(dev, handle, error);\n+\t\tret = flow_dv_action_destroy(dev, handle, error);\n+\t\tbreak;\n \tdefault:\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\tret = -ENOTSUP;\n+\t\trte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"action type not supported\");\n+\t\tbreak;\n \t}\n-\treturn 0;\n+\tif (job) {\n+\t\tif (ret) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn ret;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn ret;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n+\t}\n+\treturn ret;\n }\n \n static int\n@@ -7045,28 +7294,76 @@ flow_hw_action_update(struct rte_eth_dev *dev,\n }\n \n static int\n-flow_hw_action_query(struct rte_eth_dev *dev,\n-\t\t     const struct rte_flow_action_handle *handle, void *data,\n-\t\t     struct rte_flow_error *error)\n+flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t    const struct rte_flow_op_attr *attr,\n+\t\t\t    const struct rte_flow_action_handle *handle,\n+\t\t\t    void *data, void *user_data,\n+\t\t\t    struct rte_flow_error *error)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \tuint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;\n+\tint ret;\n+\tbool push = true;\n+\tbool aso = false;\n \n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx))\n+\t\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Action destroy failed due to queue full.\");\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_QUERY;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n-\t\treturn flow_hw_query_age(dev, age_idx, data, error);\n+\t\tret = flow_hw_query_age(dev, age_idx, data, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n-\t\treturn flow_hw_query_counter(dev, act_idx, data, error);\n+\t\tret = flow_hw_query_counter(dev, act_idx, data, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n-\t\treturn flow_hw_conntrack_query(dev, act_idx, data, error);\n-\tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n-\t\treturn flow_dv_action_query(dev, handle, data, error);\n+\t\taso = true;\n+\t\tif (job)\n+\t\t\tjob->profile = (struct rte_flow_action_conntrack *)data;\n+\t\tret = flow_hw_conntrack_query(dev, queue, act_idx, data,\n+\t\t\t\t\t      job, push, error);\n+\t\tbreak;\n \tdefault:\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\tret = -ENOTSUP;\n+\t\trte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"action type not supported\");\n+\t\tbreak;\n+\t}\n+\tif (job) {\n+\t\tif (ret) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn ret;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn ret;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n \t}\n+\treturn 0;\n+}\n+\n+static int\n+flow_hw_action_query(struct rte_eth_dev *dev,\n+\t\t     const struct rte_flow_action_handle *handle, void *data,\n+\t\t     struct rte_flow_error *error)\n+{\n+\treturn flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,\n+\t\t\thandle, data, NULL, error);\n }\n \n /**\n@@ -7181,6 +7478,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.async_action_create = flow_hw_action_handle_create,\n \t.async_action_destroy = flow_hw_action_handle_destroy,\n \t.async_action_update = flow_hw_action_handle_update,\n+\t.async_action_query = flow_hw_action_handle_query,\n \t.action_validate = flow_hw_action_validate,\n \t.action_create = flow_hw_action_create,\n \t.action_destroy = flow_hw_action_destroy,\ndiff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c\nindex fd1337ae73..480ac6c8ec 100644\n--- a/drivers/net/mlx5/mlx5_flow_meter.c\n+++ b/drivers/net/mlx5/mlx5_flow_meter.c\n@@ -1627,7 +1627,7 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,\n \t\tfm->is_enable = !!is_enable;\n \t\taso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);\n \t\tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n-\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk);\n+\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk, NULL, true);\n \t\tif (ret)\n \t\t\treturn ret;\n \t\tret = mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr);\n@@ -1877,7 +1877,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,\n \tif (priv->sh->meter_aso_en) {\n \t\taso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);\n \t\tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n-\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk);\n+\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk, NULL, true);\n \t\tif (ret)\n \t\t\tgoto error;\n \t\tif (!priv->mtr_idx_tbl) {\n@@ -1983,7 +1983,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id,\n \tfm->initialized = 1;\n \t/* Update ASO flow meter by wqe. */\n \tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr,\n-\t\t\t\t\t   &priv->mtr_bulk);\n+\t\t\t\t\t   &priv->mtr_bulk, NULL, true);\n \tif (ret)\n \t\treturn -rte_mtr_error_set(error, ENOTSUP,\n \t\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED,\n",
    "prefixes": [
        "v2",
        "14/17"
    ]
}