get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117224/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117224,
    "url": "http://patches.dpdk.org/api/patches/117224/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220930125315.5079-15-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220930125315.5079-15-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220930125315.5079-15-suanmingm@nvidia.com",
    "date": "2022-09-30T12:53:12",
    "name": "[v3,14/17] net/mlx5: add async action push and pull support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "83d40ed3f8ca822c05a543fd42211c73cfd642b3",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220930125315.5079-15-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 24935,
            "url": "http://patches.dpdk.org/api/series/24935/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24935",
            "date": "2022-09-30T12:52:58",
            "name": "net/mlx5: HW steering PMD update",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/24935/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/117224/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/117224/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 89DC5A00C4;\n\tFri, 30 Sep 2022 14:55:47 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 18D2642BC1;\n\tFri, 30 Sep 2022 14:54:24 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2057.outbound.protection.outlook.com [40.107.243.57])\n by mails.dpdk.org (Postfix) with ESMTP id 7B35C40684\n for <dev@dpdk.org>; Fri, 30 Sep 2022 14:54:17 +0200 (CEST)",
            "from DM6PR08CA0047.namprd08.prod.outlook.com (2603:10b6:5:1e0::21)\n by MW4PR12MB7029.namprd12.prod.outlook.com (2603:10b6:303:1ef::13) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.19; Fri, 30 Sep\n 2022 12:54:15 +0000",
            "from DM6NAM11FT081.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:1e0:cafe::a1) by DM6PR08CA0047.outlook.office365.com\n (2603:10b6:5:1e0::21) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.20 via Frontend\n Transport; Fri, 30 Sep 2022 12:54:14 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n DM6NAM11FT081.mail.protection.outlook.com (10.13.172.136) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.17 via Frontend Transport; Fri, 30 Sep 2022 12:54:14 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Fri, 30 Sep\n 2022 05:54:06 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Fri, 30 Sep\n 2022 05:54:04 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=EymsDMKMrwsqQgFr6zVvpxE+iXFPcu/NzINPeav8R1QvMNtKEy/CkzuSVw9GfVSb7+7pQRbgkaXKo7UYpgHt7CAKAXq8Fmi3fqb55pq8JPzh4zev4Nq8LhcKWXaNXXPMbAV90OaWpeAi8rWBDElHGrJmwZWU0uGQJ1iJeGbbTGWt6j5uV22BkDlrx5aQRFDsfH6p3sOa4y/ABTqdf2opmHnIm6t5Ps2QKK8jx5gLAc6wZpoyyikr1/4HG2YNMEGy4dNUGhdBRI/hQVpaJ2xxsvqIY4JgxvyAJHI5vMRI9rNvpeOFWEg5tGt0mDy3s/9fEvSCCmg6o6FCmIXDl5r+4g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=ZJF/XfaNgHd6DR9bNgT6SgkHsqIsDtRdtOR38a7xEi0=;\n b=jvRCH08K9DQiJ0RwnpLl3r351TrkxGNrDN1c2KphWqqj3mVxNCc9MEVFTjA5v21QHi4ZupEqViH0apI9eQptst2GmgEbNTx0ZTfxX6p657UbPdDH/NcQUc7k/yOMNkV2UMeb4o0/XF02Jrx+8X/Zn6tm4sKxjD2Kot5omfOwGA7sCYioJKR0HNO73SSNOj2PrxvAt30VgbZ7LBnRgtgE/dGKZyM2FNjou8oH1d8tiY5ighw59AiD7mvydGye6kxAdVs/AXxTbyJJqM7ZL0kGM/iZDcgekjEhnINMUOIH6Sz1WpVbeSCaxRqjW441pwirF7zAbPwYovrtauO6jA+sNg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=ZJF/XfaNgHd6DR9bNgT6SgkHsqIsDtRdtOR38a7xEi0=;\n b=LREUMTdSfpHoNZ2DB/svAgdKzeT3cSsSvKelXfCUoQCwLruyZGwO/RWzQmE6dofpoGTlaTVp9VUAGXovoMMMp0OyVHpY55j17G0c60rbXP4+nAWVXUtsJTZfTVijghoVAc6INAMmKvB+mXqI1nbX3SwngWsrNZWzlnRD1kmmBi5ylE251JL+Ae4KCWrVRVOR+1Tg8pJSdNnOA4TmCXYR4z3zfrn3Bh3xvgzxdBKSahM5tE8+b91hP+sqaJaFUdNWA5IIpOa9aimTlicSqTRm6g4jiLvL4+BgsUaCa8k9jRzJ7RnkQNaXblmt/7petbk5f74Q6PIZF8xir3PkSXRRbw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>, <orika@nvidia.com>",
        "Subject": "[PATCH v3 14/17] net/mlx5: add async action push and pull support",
        "Date": "Fri, 30 Sep 2022 15:53:12 +0300",
        "Message-ID": "<20220930125315.5079-15-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220930125315.5079-1-suanmingm@nvidia.com>",
        "References": "<20220923144334.27736-1-suanmingm@nvidia.com>\n <20220930125315.5079-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT081:EE_|MW4PR12MB7029:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "e59340e2-f173-46dd-01f6-08daa2e2e111",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n M7Vfc1vnV+/i91HtwPD6MlASGVs7O9EoMT2WsyrDe/94ZwaOYR5mBZqEXp2otFeAkb3bBZvTZknn1nvnbnXtYaktjSf5UIxImlcpsZIVGMCKgZEH8KmnTbu1fERu0oTw7nm+hm5g8frvZMNSP31IAq1OYmVMdlsnBeEuc2IEzYI7XQ0WKvJEA0Dfe6e5r794SKTYyeurlSqCVzXoB98iPisYZ7mgGUDIxnNwuJ6Mc05WtF7BgfZrDh65SlA/CKZ0ErWh7xZCeveK33Qj+/QSMi5GHBpLS8GIQjrryFmWubCXDRGrZsvTL+TPp2ZerZs6dmgh8IbWVH9f3wDjhaqc2NQB0jJjQHGYUdAJcl3BK6yVKZtg/4Lvqs9lUCfcBlyJ7xTApmqjTOmj4TQd2jgqvoOy5cS24akZ+5tLYEju6HbJv/cwDoww1Y0yIYtjkjaATuiQOdCidoVdpWHp7bRl8oeUk4RcusjRCIXOLZbj41NMfIr9TwYLcr13QyqgQALJqXqShgYu5/cLvAnwOuWWs9LZ2+ImVr4S00M7cqjU6OejHwv+7KL1GOc3/tlc7lB3rEnDu+6nbYIDwGTte1ZE5Qr8yXTTQsySAchW328i2kuGjSFpWHxJa+FPOjCyBdk9qXgR+j2iNdB3eLMdREsSOiKxkQjt7zNOAY5pTeCXE3aQSFtLf8nnZexBWc/TKLSri5+7IY7Dl3CR4pYOtkIq3aCZGjAfBxcaZfXKgXdljuIasFtsYzTCxd2nALyZcLQpV1kJUVbWe5Li4pb2cU1RX2P9Cn21BtGeXQpvbtRqE+E=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(376002)(136003)(39860400002)(396003)(346002)(451199015)(36840700001)(40470700004)(46966006)(107886003)(8936002)(7696005)(6666004)(30864003)(82310400005)(6286002)(2906002)(7636003)(356005)(26005)(36756003)(41300700001)(47076005)(426003)(5660300002)(4326008)(70586007)(70206006)(82740400003)(336012)(8676002)(83380400001)(186003)(1076003)(16526019)(2616005)(40460700003)(55016003)(86362001)(40480700001)(6636002)(36860700001)(110136005)(54906003)(478600001)(316002)(559001)(579004)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Sep 2022 12:54:14.7237 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n e59340e2-f173-46dd-01f6-08daa2e2e111",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT081.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW4PR12MB7029",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The queue based rte_flow_async_action_* functions work same as\nqueue based async flow functions. The operations can be pushed\nasynchronously, so is the pull.\n\nThis commit adds the async action missing push and pull support.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h            |  62 ++++-\n drivers/net/mlx5/mlx5_flow.c       |  45 ++++\n drivers/net/mlx5/mlx5_flow.h       |  17 ++\n drivers/net/mlx5/mlx5_flow_aso.c   | 181 +++++++++++--\n drivers/net/mlx5/mlx5_flow_dv.c    |   7 +-\n drivers/net/mlx5/mlx5_flow_hw.c    | 412 +++++++++++++++++++++++++----\n drivers/net/mlx5/mlx5_flow_meter.c |   6 +-\n 7 files changed, 626 insertions(+), 104 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex c83157d0da..f6033710aa 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -341,6 +341,8 @@ struct mlx5_lb_ctx {\n enum {\n \tMLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */\n \tMLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */\n+\tMLX5_HW_Q_JOB_TYPE_UPDATE,\n+\tMLX5_HW_Q_JOB_TYPE_QUERY,\n };\n \n #define MLX5_HW_MAX_ITEMS (16)\n@@ -348,12 +350,23 @@ enum {\n /* HW steering flow management job descriptor. */\n struct mlx5_hw_q_job {\n \tuint32_t type; /* Job type. */\n-\tstruct rte_flow_hw *flow; /* Flow attached to the job. */\n+\tunion {\n+\t\tstruct rte_flow_hw *flow; /* Flow attached to the job. */\n+\t\tconst void *action; /* Indirect action attached to the job. */\n+\t};\n \tvoid *user_data; /* Job user data. */\n \tuint8_t *encap_data; /* Encap data. */\n \tstruct mlx5_modification_cmd *mhdr_cmd;\n \tstruct rte_flow_item *items;\n-\tstruct rte_flow_item_ethdev port_spec;\n+\tunion {\n+\t\tstruct {\n+\t\t\t/* Pointer to ct query user memory. */\n+\t\t\tstruct rte_flow_action_conntrack *profile;\n+\t\t\t/* Pointer to ct ASO query out memory. */\n+\t\t\tvoid *out_data;\n+\t\t} __rte_packed;\n+\t\tstruct rte_flow_item_ethdev port_spec;\n+\t} __rte_packed;\n };\n \n /* HW steering job descriptor LIFO pool. */\n@@ -361,6 +374,8 @@ struct mlx5_hw_q {\n \tuint32_t job_idx; /* Free job index. */\n \tuint32_t size; /* LIFO size. */\n \tstruct mlx5_hw_q_job **job; /* LIFO header. */\n+\tstruct rte_ring *indir_cq; /* Indirect action SW completion queue. */\n+\tstruct rte_ring *indir_iq; /* Indirect action SW in progress queue. */\n } __rte_cache_aligned;\n \n \n@@ -569,6 +584,7 @@ struct mlx5_aso_sq_elem {\n \t\t\tstruct mlx5_aso_ct_action *ct;\n \t\t\tchar *query_data;\n \t\t};\n+\t\tvoid *user_data;\n \t};\n };\n \n@@ -578,7 +594,9 @@ struct mlx5_aso_sq {\n \tstruct mlx5_aso_cq cq;\n \tstruct mlx5_devx_sq sq_obj;\n \tstruct mlx5_pmd_mr mr;\n+\tvolatile struct mlx5_aso_wqe *db;\n \tuint16_t pi;\n+\tuint16_t db_pi;\n \tuint32_t head;\n \tuint32_t tail;\n \tuint32_t sqn;\n@@ -993,6 +1011,7 @@ struct mlx5_flow_meter_profile {\n enum mlx5_aso_mtr_state {\n \tASO_METER_FREE, /* In free list. */\n \tASO_METER_WAIT, /* ACCESS_ASO WQE in progress. */\n+\tASO_METER_WAIT_ASYNC, /* CQE will be handled by async pull. */\n \tASO_METER_READY, /* CQE received. */\n };\n \n@@ -1195,6 +1214,7 @@ struct mlx5_bond_info {\n enum mlx5_aso_ct_state {\n \tASO_CONNTRACK_FREE, /* Inactive, in the free list. */\n \tASO_CONNTRACK_WAIT, /* WQE sent in the SQ. */\n+\tASO_CONNTRACK_WAIT_ASYNC, /* CQE will be handled by async pull. */\n \tASO_CONNTRACK_READY, /* CQE received w/o error. */\n \tASO_CONNTRACK_QUERY, /* WQE for query sent. */\n \tASO_CONNTRACK_MAX, /* Guard. */\n@@ -1203,13 +1223,21 @@ enum mlx5_aso_ct_state {\n /* Generic ASO connection tracking structure. */\n struct mlx5_aso_ct_action {\n \tunion {\n-\t\tLIST_ENTRY(mlx5_aso_ct_action) next;\n-\t\t/* Pointer to the next ASO CT. Used only in SWS. */\n-\t\tstruct mlx5_aso_ct_pool *pool;\n-\t\t/* Pointer to action pool. Used only in HWS. */\n+\t\t/* SWS mode struct. */\n+\t\tstruct {\n+\t\t\t/* Pointer to the next ASO CT. Used only in SWS. */\n+\t\t\tLIST_ENTRY(mlx5_aso_ct_action) next;\n+\t\t};\n+\t\t/* HWS mode struct. */\n+\t\tstruct {\n+\t\t\t/* Pointer to action pool. Used only in HWS. */\n+\t\t\tstruct mlx5_aso_ct_pool *pool;\n+\t\t};\n \t};\n-\tvoid *dr_action_orig; /* General action object for original dir. */\n-\tvoid *dr_action_rply; /* General action object for reply dir. */\n+\t/* General action object for original dir. */\n+\tvoid *dr_action_orig;\n+\t/* General action object for reply dir. */\n+\tvoid *dr_action_rply;\n \tuint32_t refcnt; /* Action used count in device flows. */\n \tuint16_t offset; /* Offset of ASO CT in DevX objects bulk. */\n \tuint16_t peer; /* The only peer port index could also use this CT. */\n@@ -2135,18 +2163,21 @@ int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);\n void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n \t\t\t   enum mlx5_access_aso_opc_mod aso_opc_mod);\n int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n-\t\t\t\t struct mlx5_aso_mtr *mtr,\n-\t\t\t\t struct mlx5_mtr_bulk *bulk);\n+\t\tstruct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk,\n+\t\tvoid *user_data, bool push);\n int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\tstruct mlx5_aso_mtr *mtr);\n int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n-\t\t\t      const struct rte_flow_action_conntrack *profile);\n+\t\t\t      const struct rte_flow_action_conntrack *profile,\n+\t\t\t      void *user_data,\n+\t\t\t      bool push);\n int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t   struct mlx5_aso_ct_action *ct);\n int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t     struct mlx5_aso_ct_action *ct,\n-\t\t\t     struct rte_flow_action_conntrack *profile);\n+\t\t\t     struct rte_flow_action_conntrack *profile,\n+\t\t\t     void *user_data, bool push);\n int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t  struct mlx5_aso_ct_action *ct);\n uint32_t\n@@ -2154,6 +2185,13 @@ mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);\n uint32_t\n mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);\n \n+void mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,\n+\t\t\t     char *wdata);\n+void mlx5_aso_push_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t       struct mlx5_aso_sq *sq);\n+int mlx5_aso_pull_completion(struct mlx5_aso_sq *sq,\n+\t\t\t     struct rte_flow_op_result res[],\n+\t\t\t     uint16_t n_res);\n int mlx5_aso_cnt_queue_init(struct mlx5_dev_ctx_shared *sh);\n void mlx5_aso_cnt_queue_uninit(struct mlx5_dev_ctx_shared *sh);\n int mlx5_aso_cnt_query(struct mlx5_dev_ctx_shared *sh,\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 4bfa604578..bc2ccb4d3c 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -979,6 +979,14 @@ mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t  void *user_data,\n \t\t\t\t  struct rte_flow_error *error);\n \n+static int\n+mlx5_flow_async_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t\t const struct rte_flow_action_handle *handle,\n+\t\t\t\t void *data,\n+\t\t\t\t void *user_data,\n+\t\t\t\t struct rte_flow_error *error);\n+\n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n \t.create = mlx5_flow_create,\n@@ -1015,6 +1023,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.push = mlx5_flow_push,\n \t.async_action_handle_create = mlx5_flow_async_action_handle_create,\n \t.async_action_handle_update = mlx5_flow_async_action_handle_update,\n+\t.async_action_handle_query = mlx5_flow_async_action_handle_query,\n \t.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,\n };\n \n@@ -8858,6 +8867,42 @@ mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t\t update, user_data, error);\n }\n \n+/**\n+ * Query shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] handle\n+ *   Action handle to be updated.\n+ * @param[in] data\n+ *   Pointer query result data.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_async_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t    const struct rte_flow_op_attr *attr,\n+\t\t\t\t    const struct rte_flow_action_handle *handle,\n+\t\t\t\t    void *data,\n+\t\t\t\t    void *user_data,\n+\t\t\t\t    struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops =\n+\t\t\tflow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\n+\treturn fops->async_action_query(dev, queue, attr, handle,\n+\t\t\t\t\tdata, user_data, error);\n+}\n+\n /**\n  * Destroy shared action.\n  *\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 30a18ea35e..e45869a890 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -57,6 +57,13 @@ enum mlx5_rte_flow_field_id {\n \n #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29\n \n+#define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \\\n+\t(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)\n+\n+#define MLX5_INDIRECT_ACTION_IDX_GET(handle) \\\n+\t(((uint32_t)(uintptr_t)(handle)) & \\\n+\t ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))\n+\n enum {\n \tMLX5_INDIRECT_ACTION_TYPE_RSS,\n \tMLX5_INDIRECT_ACTION_TYPE_AGE,\n@@ -1816,6 +1823,15 @@ typedef int (*mlx5_flow_async_action_handle_update_t)\n \t\t\t void *user_data,\n \t\t\t struct rte_flow_error *error);\n \n+typedef int (*mlx5_flow_async_action_handle_query_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t uint32_t queue,\n+\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t const struct rte_flow_action_handle *handle,\n+\t\t\t void *data,\n+\t\t\t void *user_data,\n+\t\t\t struct rte_flow_error *error);\n+\n typedef int (*mlx5_flow_async_action_handle_destroy_t)\n \t\t\t(struct rte_eth_dev *dev,\n \t\t\t uint32_t queue,\n@@ -1878,6 +1894,7 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_push_t push;\n \tmlx5_flow_async_action_handle_create_t async_action_create;\n \tmlx5_flow_async_action_handle_update_t async_action_update;\n+\tmlx5_flow_async_action_handle_query_t async_action_query;\n \tmlx5_flow_async_action_handle_destroy_t async_action_destroy;\n };\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex f371fff2e2..43ef893e9d 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -519,6 +519,70 @@ mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)\n \t\t\t       (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);\n }\n \n+int\n+mlx5_aso_pull_completion(struct mlx5_aso_sq *sq,\n+\t\t\t struct rte_flow_op_result res[],\n+\t\t\t uint16_t n_res)\n+{\n+\tstruct mlx5_aso_cq *cq = &sq->cq;\n+\tvolatile struct mlx5_cqe *restrict cqe;\n+\tconst uint32_t cq_size = 1 << cq->log_desc_n;\n+\tconst uint32_t mask = cq_size - 1;\n+\tuint32_t idx;\n+\tuint32_t next_idx;\n+\tuint16_t max;\n+\tuint16_t n = 0;\n+\tint ret;\n+\n+\tmax = (uint16_t)(sq->head - sq->tail);\n+\tif (unlikely(!max || !n_res))\n+\t\treturn 0;\n+\tnext_idx = cq->cq_ci & mask;\n+\tdo {\n+\t\tidx = next_idx;\n+\t\tnext_idx = (cq->cq_ci + 1) & mask;\n+\t\t/* Need to confirm the position of the prefetch. */\n+\t\trte_prefetch0(&cq->cq_obj.cqes[next_idx]);\n+\t\tcqe = &cq->cq_obj.cqes[idx];\n+\t\tret = check_cqe(cqe, cq_size, cq->cq_ci);\n+\t\t/*\n+\t\t * Be sure owner read is done before any other cookie field or\n+\t\t * opaque field.\n+\t\t */\n+\t\trte_io_rmb();\n+\t\tif (ret == MLX5_CQE_STATUS_HW_OWN)\n+\t\t\tbreak;\n+\t\tres[n].user_data = sq->elts[(uint16_t)((sq->tail + n) & mask)].user_data;\n+\t\tif (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {\n+\t\t\tmlx5_aso_cqe_err_handle(sq);\n+\t\t\tres[n].status = RTE_FLOW_OP_ERROR;\n+\t\t} else {\n+\t\t\tres[n].status = RTE_FLOW_OP_SUCCESS;\n+\t\t}\n+\t\tcq->cq_ci++;\n+\t\tif (++n == n_res)\n+\t\t\tbreak;\n+\t} while (1);\n+\tif (likely(n)) {\n+\t\tsq->tail += n;\n+\t\trte_io_wmb();\n+\t\tcq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);\n+\t}\n+\treturn n;\n+}\n+\n+void\n+mlx5_aso_push_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t  struct mlx5_aso_sq *sq)\n+{\n+\tif (sq->db_pi == sq->pi)\n+\t\treturn;\n+\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)sq->db,\n+\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t   !sh->tx_uar.dbnc);\n+\tsq->db_pi = sq->pi;\n+}\n+\n /**\n  * Update ASO objects upon completion.\n  *\n@@ -728,7 +792,9 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t\t\t       struct mlx5_aso_sq *sq,\n \t\t\t       struct mlx5_aso_mtr *aso_mtr,\n \t\t\t       struct mlx5_mtr_bulk *bulk,\n-\t\t\t\t   bool need_lock)\n+\t\t\t       bool need_lock,\n+\t\t\t       void *user_data,\n+\t\t\t       bool push)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n \tstruct mlx5_flow_meter_info *fm = NULL;\n@@ -754,7 +820,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n \t/* Fill next WQE. */\n \tfm = &aso_mtr->fm;\n-\tsq->elts[sq->head & mask].mtr = aso_mtr;\n+\tsq->elts[sq->head & mask].mtr = user_data ? user_data : aso_mtr;\n \tif (aso_mtr->type == ASO_METER_INDIRECT) {\n \t\tif (likely(sh->config.dv_flow_en == 2))\n \t\t\tpool = aso_mtr->pool;\n@@ -820,9 +886,13 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t */\n \tsq->head++;\n \tsq->pi += 2;/* Each WQE contains 2 WQEBB's. */\n-\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n+\tif (push) {\n+\t\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n \t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n \t\t\t   !sh->tx_uar.dbnc);\n+\t\tsq->db_pi = sq->pi;\n+\t}\n+\tsq->db = wqe;\n \tif (need_lock)\n \t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n@@ -912,11 +982,14 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock)\n int\n mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\tstruct mlx5_aso_mtr *mtr,\n-\t\t\tstruct mlx5_mtr_bulk *bulk)\n+\t\t\tstruct mlx5_mtr_bulk *bulk,\n+\t\t\tvoid *user_data,\n+\t\t\tbool push)\n {\n \tstruct mlx5_aso_sq *sq;\n \tuint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n \tbool need_lock;\n+\tint ret;\n \n \tif (likely(sh->config.dv_flow_en == 2)) {\n \t\tif (queue == MLX5_HW_INV_QUEUE) {\n@@ -930,10 +1003,15 @@ mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\tsq = &sh->mtrmng->pools_mng.sq;\n \t\tneed_lock = true;\n \t}\n+\tif (queue != MLX5_HW_INV_QUEUE) {\n+\t\tret = mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,\n+\t\t\t\t\t\t     need_lock, user_data, push);\n+\t\treturn ret > 0 ? 0 : -1;\n+\t}\n \tdo {\n \t\tmlx5_aso_mtr_completion_handle(sq, need_lock);\n-\t\tif (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr,\n-\t\t\t\t\t\t   bulk, need_lock))\n+\t\tif (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk,\n+\t\t\t\t\t\t   need_lock, NULL, true))\n \t\t\treturn 0;\n \t\t/* Waiting for wqe resource. */\n \t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n@@ -962,6 +1040,7 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n {\n \tstruct mlx5_aso_sq *sq;\n \tuint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n+\tuint8_t state;\n \tbool need_lock;\n \n \tif (likely(sh->config.dv_flow_en == 2)) {\n@@ -976,8 +1055,8 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\tsq = &sh->mtrmng->pools_mng.sq;\n \t\tneed_lock = true;\n \t}\n-\tif (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==\n-\t\t\t\t\t    ASO_METER_READY)\n+\tstate = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);\n+\tif (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC)\n \t\treturn 0;\n \tdo {\n \t\tmlx5_aso_mtr_completion_handle(sq, need_lock);\n@@ -1093,7 +1172,9 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t\t\t      struct mlx5_aso_sq *sq,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n \t\t\t      const struct rte_flow_action_conntrack *profile,\n-\t\t\t      bool need_lock)\n+\t\t\t      bool need_lock,\n+\t\t\t      void *user_data,\n+\t\t\t      bool push)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n \tuint16_t size = 1 << sq->log_desc_n;\n@@ -1117,10 +1198,16 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \twqe = &sq->sq_obj.aso_wqes[sq->head & mask];\n \trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n \t/* Fill next WQE. */\n-\tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);\n-\tsq->elts[sq->head & mask].ct = ct;\n-\tsq->elts[sq->head & mask].query_data = NULL;\n+\tMLX5_ASO_CT_UPDATE_STATE(ct,\n+\t\t\tuser_data ? ASO_CONNTRACK_WAIT_ASYNC : ASO_CONNTRACK_WAIT);\n+\tif (user_data) {\n+\t\tsq->elts[sq->head & mask].user_data = user_data;\n+\t} else {\n+\t\tsq->elts[sq->head & mask].ct = ct;\n+\t\tsq->elts[sq->head & mask].query_data = NULL;\n+\t}\n \tpool = __mlx5_aso_ct_get_pool(sh, ct);\n+\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n \t\t\t\t\t\t  ct->offset);\n@@ -1200,9 +1287,13 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \t\t profile->reply_dir.max_ack);\n \tsq->head++;\n \tsq->pi += 2; /* Each WQE contains 2 WQEBB's. */\n-\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n-\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n-\t\t\t   !sh->tx_uar.dbnc);\n+\tif (push) {\n+\t\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n+\t\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t\t   !sh->tx_uar.dbnc);\n+\t\tsq->db_pi = sq->pi;\n+\t}\n+\tsq->db = wqe;\n \tif (need_lock)\n \t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n@@ -1258,7 +1349,9 @@ static int\n mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \t\t\t    struct mlx5_aso_sq *sq,\n \t\t\t    struct mlx5_aso_ct_action *ct, char *data,\n-\t\t\t    bool need_lock)\n+\t\t\t    bool need_lock,\n+\t\t\t    void *user_data,\n+\t\t\t    bool push)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n \tuint16_t size = 1 << sq->log_desc_n;\n@@ -1284,14 +1377,23 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \t\tDRV_LOG(ERR, \"Fail: SQ is full and no free WQE to send\");\n \t\treturn 0;\n \t}\n-\tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_QUERY);\n+\tMLX5_ASO_CT_UPDATE_STATE(ct,\n+\t\t\tuser_data ? ASO_CONNTRACK_WAIT_ASYNC : ASO_CONNTRACK_QUERY);\n \twqe = &sq->sq_obj.aso_wqes[sq->head & mask];\n \t/* Confirm the location and address of the prefetch instruction. */\n \trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n \t/* Fill next WQE. */\n \twqe_idx = sq->head & mask;\n-\tsq->elts[wqe_idx].ct = ct;\n-\tsq->elts[wqe_idx].query_data = data;\n+\t/* Check if this is async mode. */\n+\tif (user_data) {\n+\t\tstruct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)user_data;\n+\n+\t\tsq->elts[wqe_idx].ct = user_data;\n+\t\tjob->out_data = (char *)((uintptr_t)sq->mr.addr + wqe_idx * 64);\n+\t} else {\n+\t\tsq->elts[wqe_idx].query_data = data;\n+\t\tsq->elts[wqe_idx].ct = ct;\n+\t}\n \tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n@@ -1317,9 +1419,13 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \t * data segment is not used in this case.\n \t */\n \tsq->pi += 2;\n-\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n-\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n-\t\t\t   !sh->tx_uar.dbnc);\n+\tif (push) {\n+\t\tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n+\t\t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t\t   !sh->tx_uar.dbnc);\n+\t\tsq->db_pi = sq->pi;\n+\t}\n+\tsq->db = wqe;\n \tif (need_lock)\n \t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n@@ -1405,20 +1511,29 @@ int\n mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\t\t  uint32_t queue,\n \t\t\t  struct mlx5_aso_ct_action *ct,\n-\t\t\t  const struct rte_flow_action_conntrack *profile)\n+\t\t\t  const struct rte_flow_action_conntrack *profile,\n+\t\t\t  void *user_data,\n+\t\t\t  bool push)\n {\n \tuint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n \tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n \tstruct mlx5_aso_sq *sq;\n \tbool need_lock = !!(queue == MLX5_HW_INV_QUEUE);\n+\tint ret;\n \n \tif (sh->config.dv_flow_en == 2)\n \t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n \telse\n \t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n+\tif (queue != MLX5_HW_INV_QUEUE) {\n+\t\tret = mlx5_aso_ct_sq_enqueue_single(sh, sq, ct, profile,\n+\t\t\t\t\t\t    need_lock, user_data, push);\n+\t\treturn ret > 0 ? 0 : -1;\n+\t}\n \tdo {\n-\t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n-\t\tif (mlx5_aso_ct_sq_enqueue_single(sh, sq, ct, profile, need_lock))\n+\t\tmlx5_aso_ct_completion_handle(sh, sq,  need_lock);\n+\t\tif (mlx5_aso_ct_sq_enqueue_single(sh, sq, ct, profile,\n+\t\t\t\t\t\t  need_lock, NULL, true))\n \t\t\treturn 0;\n \t\t/* Waiting for wqe resource. */\n \t\trte_delay_us_sleep(10u);\n@@ -1478,7 +1593,7 @@ mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n  * @param[in] wdata\n  *   Pointer to data fetched from hardware.\n  */\n-static inline void\n+void\n mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,\n \t\t\tchar *wdata)\n {\n@@ -1562,7 +1677,8 @@ int\n mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\t\t uint32_t queue,\n \t\t\t struct mlx5_aso_ct_action *ct,\n-\t\t\t struct rte_flow_action_conntrack *profile)\n+\t\t\t struct rte_flow_action_conntrack *profile,\n+\t\t\t void *user_data, bool push)\n {\n \tuint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n \tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n@@ -1575,9 +1691,15 @@ mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n \telse\n \t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n+\tif (queue != MLX5_HW_INV_QUEUE) {\n+\t\tret = mlx5_aso_ct_sq_query_single(sh, sq, ct, out_data,\n+\t\t\t\t\t\t  need_lock, user_data, push);\n+\t\treturn ret > 0 ? 0 : -1;\n+\t}\n \tdo {\n \t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n-\t\tret = mlx5_aso_ct_sq_query_single(sh, sq, ct, out_data, need_lock);\n+\t\tret = mlx5_aso_ct_sq_query_single(sh, sq, ct, out_data,\n+\t\t\t\tneed_lock, NULL, true);\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t\telse if (ret > 0)\n@@ -1628,7 +1750,8 @@ mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,\n \t\trte_errno = ENXIO;\n \t\treturn -rte_errno;\n \t} else if (state == ASO_CONNTRACK_READY ||\n-\t\t   state == ASO_CONNTRACK_QUERY) {\n+\t\t   state == ASO_CONNTRACK_QUERY ||\n+\t\t   state == ASO_CONNTRACK_WAIT_ASYNC) {\n \t\treturn 0;\n \t}\n \tdo {\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 1146e13cfa..d31838e26e 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -13103,7 +13103,7 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"Failed to allocate CT object\");\n \tct = flow_aso_ct_get_by_dev_idx(dev, idx);\n-\tif (mlx5_aso_ct_update_by_wqe(sh, MLX5_HW_INV_QUEUE, ct, pro)) {\n+\tif (mlx5_aso_ct_update_by_wqe(sh, MLX5_HW_INV_QUEUE, ct, pro, NULL, true)) {\n \t\tflow_dv_aso_ct_dev_release(dev, idx);\n \t\trte_flow_error_set(error, EBUSY,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n@@ -15917,7 +15917,7 @@ __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,\n \t\tif (ret)\n \t\t\treturn ret;\n \t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n-\t\t\t\t\t\tct, new_prf);\n+\t\t\t\t\t\tct, new_prf, NULL, true);\n \t\tif (ret)\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -16753,7 +16753,8 @@ flow_dv_action_query(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\tct->peer;\n \t\t((struct rte_flow_action_conntrack *)data)->is_original_dir =\n \t\t\t\t\t\t\tct->is_original;\n-\t\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct, data))\n+\t\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct,\n+\t\t\t\t\tdata, NULL, true))\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\tNULL,\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 161b96cd87..9f70637fcf 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -1178,9 +1178,9 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n }\n \n static __rte_always_inline struct mlx5_aso_mtr *\n-flow_hw_meter_mark_alloc(struct rte_eth_dev *dev,\n-\t\t\t   const struct rte_flow_action *action,\n-\t\t\t   uint32_t queue)\n+flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t const struct rte_flow_action *action,\n+\t\t\t void *user_data, bool push)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n@@ -1200,13 +1200,14 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev,\n \tfm->is_enable = meter_mark->state;\n \tfm->color_aware = meter_mark->color_mode;\n \taso_mtr->pool = pool;\n-\taso_mtr->state = ASO_METER_WAIT;\n+\taso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?\n+\t\t\t  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;\n \taso_mtr->offset = mtr_id - 1;\n \taso_mtr->init_color = (meter_mark->color_mode) ?\n \t\tmeter_mark->init_color : RTE_COLOR_GREEN;\n \t/* Update ASO flow meter by wqe. */\n \tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,\n-\t\t\t\t\t &priv->mtr_bulk)) {\n+\t\t\t\t\t &priv->mtr_bulk, user_data, push)) {\n \t\tmlx5_ipool_free(pool->idx_pool, mtr_id);\n \t\treturn NULL;\n \t}\n@@ -1231,7 +1232,7 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \n-\taso_mtr = flow_hw_meter_mark_alloc(dev, action, queue);\n+\taso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true);\n \tif (!aso_mtr)\n \t\treturn -1;\n \n@@ -2295,9 +2296,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\trte_col_2_mlx5_col(aso_mtr->init_color);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\t\t/*\n+\t\t\t * Allocate meter directly will slow down flow\n+\t\t\t * insertion rate.\n+\t\t\t */\n \t\t\tret = flow_hw_meter_mark_compile(dev,\n \t\t\t\tact_data->action_dst, action,\n-\t\t\t\trule_acts, &job->flow->mtr_id, queue);\n+\t\t\t\trule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE);\n \t\t\tif (ret != 0)\n \t\t\t\treturn ret;\n \t\t\tbreak;\n@@ -2604,6 +2609,74 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,\n \t}\n }\n \n+static inline int\n+__flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,\n+\t\t\t\t uint32_t queue,\n+\t\t\t\t struct rte_flow_op_result res[],\n+\t\t\t\t uint16_t n_res)\n+\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_ring *r = priv->hw_q[queue].indir_cq;\n+\tstruct mlx5_hw_q_job *job;\n+\tvoid *user_data = NULL;\n+\tuint32_t type, idx;\n+\tstruct mlx5_aso_mtr *aso_mtr;\n+\tstruct mlx5_aso_ct_action *aso_ct;\n+\tint ret_comp, i;\n+\n+\tret_comp = (int)rte_ring_count(r);\n+\tif (ret_comp > n_res)\n+\t\tret_comp = n_res;\n+\tfor (i = 0; i < ret_comp; i++) {\n+\t\trte_ring_dequeue(r, &user_data);\n+\t\tres[i].user_data = user_data;\n+\t\tres[i].status = RTE_FLOW_OP_SUCCESS;\n+\t}\n+\tif (ret_comp < n_res && priv->hws_mpool)\n+\t\tret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],\n+\t\t\t\t&res[ret_comp], n_res - ret_comp);\n+\tif (ret_comp < n_res && priv->hws_ctpool)\n+\t\tret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],\n+\t\t\t\t&res[ret_comp], n_res - ret_comp);\n+\tfor (i = 0; i <  ret_comp; i++) {\n+\t\tjob = (struct mlx5_hw_q_job *)res[i].user_data;\n+\t\t/* Restore user data. */\n+\t\tres[i].user_data = job->user_data;\n+\t\tif (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {\n+\t\t\ttype = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);\n+\t\t\tif (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {\n+\t\t\t\tidx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);\n+\t\t\t\tmlx5_ipool_free(priv->hws_mpool->idx_pool, idx);\n+\t\t\t}\n+\t\t} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {\n+\t\t\ttype = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);\n+\t\t\tif (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {\n+\t\t\t\tidx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);\n+\t\t\t\taso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);\n+\t\t\t\taso_mtr->state = ASO_METER_READY;\n+\t\t\t} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {\n+\t\t\t\tidx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t\t((uint32_t)(uintptr_t)job->action);\n+\t\t\t\taso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);\n+\t\t\t\taso_ct->state = ASO_CONNTRACK_READY;\n+\t\t\t}\n+\t\t} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {\n+\t\t\ttype = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);\n+\t\t\tif (type == MLX5_INDIRECT_ACTION_TYPE_CT) {\n+\t\t\t\tidx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t\t((uint32_t)(uintptr_t)job->action);\n+\t\t\t\taso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);\n+\t\t\t\tmlx5_aso_ct_obj_analyze(job->profile,\n+\t\t\t\t\t\t\tjob->out_data);\n+\t\t\t\taso_ct->state = ASO_CONNTRACK_READY;\n+\t\t\t}\n+\t\t}\n+\t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n+\t}\n+\treturn ret_comp;\n+}\n+\n /**\n  * Pull the enqueued flows.\n  *\n@@ -2636,6 +2709,7 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \tstruct mlx5_hw_q_job *job;\n \tint ret, i;\n \n+\t/* 1. Pull the flow completion. */\n \tret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);\n \tif (ret < 0)\n \t\treturn rte_flow_error_set(error, rte_errno,\n@@ -2661,9 +2735,34 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \t\t}\n \t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n \t}\n+\t/* 2. Pull indirect action comp. */\n+\tif (ret < n_res)\n+\t\tret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],\n+\t\t\t\t\t\t\tn_res - ret);\n \treturn ret;\n }\n \n+static inline void\n+__flow_hw_push_action(struct rte_eth_dev *dev,\n+\t\t    uint32_t queue)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_ring *iq = priv->hw_q[queue].indir_iq;\n+\tstruct rte_ring *cq = priv->hw_q[queue].indir_cq;\n+\tvoid *job = NULL;\n+\tuint32_t ret, i;\n+\n+\tret = rte_ring_count(iq);\n+\tfor (i = 0; i < ret; i++) {\n+\t\trte_ring_dequeue(iq, &job);\n+\t\trte_ring_enqueue(cq, job);\n+\t}\n+\tif (priv->hws_ctpool)\n+\t\tmlx5_aso_push_wqe(priv->sh, &priv->ct_mng->aso_sqs[queue]);\n+\tif (priv->hws_mpool)\n+\t\tmlx5_aso_push_wqe(priv->sh, &priv->hws_mpool->sq[queue]);\n+}\n+\n /**\n  * Push the enqueued flows to HW.\n  *\n@@ -2687,6 +2786,7 @@ flow_hw_push(struct rte_eth_dev *dev,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tint ret;\n \n+\t__flow_hw_push_action(dev, queue);\n \tret = mlx5dr_send_queue_action(priv->dr_ctx, queue,\n \t\t\t\t       MLX5DR_SEND_QUEUE_ACTION_DRAIN);\n \tif (ret) {\n@@ -5944,7 +6044,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t/* Adds one queue to be used by PMD.\n \t * The last queue will be used by the PMD.\n \t */\n-\tuint16_t nb_q_updated;\n+\tuint16_t nb_q_updated = 0;\n \tstruct rte_flow_queue_attr **_queue_attr = NULL;\n \tstruct rte_flow_queue_attr ctrl_queue_attr = {0};\n \tbool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);\n@@ -6011,6 +6111,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\tgoto err;\n \t}\n \tfor (i = 0; i < nb_q_updated; i++) {\n+\t\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n \t\tuint8_t *encap = NULL;\n \t\tstruct mlx5_modification_cmd *mhdr_cmd = NULL;\n \t\tstruct rte_flow_item *items = NULL;\n@@ -6038,6 +6139,22 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t\tjob[j].items = &items[j * MLX5_HW_MAX_ITEMS];\n \t\t\tpriv->hw_q[i].job[j] = &job[j];\n \t\t}\n+\t\tsnprintf(mz_name, sizeof(mz_name), \"port_%u_indir_act_cq_%u\",\n+\t\t\t dev->data->port_id, i);\n+\t\tpriv->hw_q[i].indir_cq = rte_ring_create(mz_name,\n+\t\t\t\t_queue_attr[i]->size, SOCKET_ID_ANY,\n+\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ |\n+\t\t\t\tRING_F_EXACT_SZ);\n+\t\tif (!priv->hw_q[i].indir_cq)\n+\t\t\tgoto err;\n+\t\tsnprintf(mz_name, sizeof(mz_name), \"port_%u_indir_act_iq_%u\",\n+\t\t\t dev->data->port_id, i);\n+\t\tpriv->hw_q[i].indir_iq = rte_ring_create(mz_name,\n+\t\t\t\t_queue_attr[i]->size, SOCKET_ID_ANY,\n+\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ |\n+\t\t\t\tRING_F_EXACT_SZ);\n+\t\tif (!priv->hw_q[i].indir_iq)\n+\t\t\tgoto err;\n \t}\n \tdr_ctx_attr.pd = priv->sh->cdev->pd;\n \tdr_ctx_attr.queues = nb_q_updated;\n@@ -6155,6 +6272,12 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \tflow_hw_destroy_vlan(dev);\n \tif (dr_ctx)\n \t\tclaim_zero(mlx5dr_context_close(dr_ctx));\n+\tfor (i = 0; i < nb_q_updated; i++) {\n+\t\tif (priv->hw_q[i].indir_iq)\n+\t\t\trte_ring_free(priv->hw_q[i].indir_iq);\n+\t\tif (priv->hw_q[i].indir_cq)\n+\t\t\trte_ring_free(priv->hw_q[i].indir_cq);\n+\t}\n \tmlx5_free(priv->hw_q);\n \tpriv->hw_q = NULL;\n \tif (priv->acts_ipool) {\n@@ -6184,7 +6307,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \tstruct rte_flow_template_table *tbl;\n \tstruct rte_flow_pattern_template *it;\n \tstruct rte_flow_actions_template *at;\n-\tint i;\n+\tuint32_t i;\n \n \tif (!priv->dr_ctx)\n \t\treturn;\n@@ -6230,6 +6353,10 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \t\tflow_hw_ct_mng_destroy(dev, priv->ct_mng);\n \t\tpriv->ct_mng = NULL;\n \t}\n+\tfor (i = 0; i < priv->nb_queue; i++) {\n+\t\trte_ring_free(priv->hw_q[i].indir_iq);\n+\t\trte_ring_free(priv->hw_q[i].indir_cq);\n+\t}\n \tmlx5_free(priv->hw_q);\n \tpriv->hw_q = NULL;\n \tclaim_zero(mlx5dr_context_close(priv->dr_ctx));\n@@ -6418,8 +6545,9 @@ flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,\n }\n \n static int\n-flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n+flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,\n \t\t\tstruct rte_flow_action_conntrack *profile,\n+\t\t\tvoid *user_data, bool push,\n \t\t\tstruct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -6443,7 +6571,7 @@ flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n \t}\n \tprofile->peer_port = ct->peer;\n \tprofile->is_original_dir = ct->is_original;\n-\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct, profile))\n+\tif (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))\n \t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL,\n@@ -6455,7 +6583,8 @@ flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n static int\n flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t const struct rte_flow_modify_conntrack *action_conf,\n-\t\t\t uint32_t idx, struct rte_flow_error *error)\n+\t\t\t uint32_t idx, void *user_data, bool push,\n+\t\t\t struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n@@ -6486,7 +6615,8 @@ flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\tret = mlx5_validate_action_ct(dev, new_prf, error);\n \t\tif (ret)\n \t\t\treturn ret;\n-\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf);\n+\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,\n+\t\t\t\t\t\tuser_data, push);\n \t\tif (ret)\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -6508,6 +6638,7 @@ flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n static struct rte_flow_action_handle *\n flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t const struct rte_flow_action_conntrack *pro,\n+\t\t\t void *user_data, bool push,\n \t\t\t struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -6534,7 +6665,7 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n \tct->is_original = !!pro->is_original_dir;\n \tct->peer = pro->peer_port;\n \tct->pool = pool;\n-\tif (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro)) {\n+\tif (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {\n \t\tmlx5_ipool_free(pool->cts, ct_idx);\n \t\trte_flow_error_set(error, EBUSY,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n@@ -6626,15 +6757,29 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t     struct rte_flow_error *error)\n {\n \tstruct rte_flow_action_handle *handle = NULL;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_action_age *age;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tcnt_id_t cnt_id;\n \tuint32_t mtr_id;\n \tuint32_t age_idx;\n+\tbool push = true;\n+\tbool aso = false;\n \n-\tRTE_SET_USED(attr);\n-\tRTE_SET_USED(user_data);\n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx)) {\n+\t\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Flow queue full.\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_CREATE;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (action->type) {\n \tcase RTE_FLOW_ACTION_TYPE_AGE:\n \t\tage = action->conf;\n@@ -6662,10 +6807,13 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t (uintptr_t)cnt_id;\n \t\tbreak;\n \tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n-\t\thandle = flow_hw_conntrack_create(dev, queue, action->conf, error);\n+\t\taso = true;\n+\t\thandle = flow_hw_conntrack_create(dev, queue, action->conf, job,\n+\t\t\t\t\t\t  push, error);\n \t\tbreak;\n \tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n-\t\taso_mtr = flow_hw_meter_mark_alloc(dev, action, queue);\n+\t\taso = true;\n+\t\taso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push);\n \t\tif (!aso_mtr)\n \t\t\tbreak;\n \t\tmtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<\n@@ -6678,7 +6826,20 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \tdefault:\n \t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t   NULL, \"action type not supported\");\n-\t\treturn NULL;\n+\t\tbreak;\n+\t}\n+\tif (job) {\n+\t\tif (!handle) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn handle;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n \t}\n \treturn handle;\n }\n@@ -6712,32 +6873,56 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t     void *user_data,\n \t\t\t     struct rte_flow_error *error)\n {\n-\tRTE_SET_USED(attr);\n-\tRTE_SET_USED(user_data);\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n+\tconst struct rte_flow_modify_conntrack *ct_conf =\n+\t\t(const struct rte_flow_modify_conntrack *)update;\n \tconst struct rte_flow_update_meter_mark *upd_meter_mark =\n \t\t(const struct rte_flow_update_meter_mark *)update;\n \tconst struct rte_flow_action_meter_mark *meter_mark;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tstruct mlx5_flow_meter_info *fm;\n \tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \tuint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n+\tint ret = 0;\n+\tbool push = true;\n+\tbool aso = false;\n \n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx))\n+\t\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Action update failed due to queue full.\");\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_UPDATE;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n-\t\treturn mlx5_hws_age_action_update(priv, idx, update, error);\n+\t\tret = mlx5_hws_age_action_update(priv, idx, update, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n-\t\treturn flow_hw_conntrack_update(dev, queue, update, act_idx, error);\n+\t\tif (ct_conf->state)\n+\t\t\taso = true;\n+\t\tret = flow_hw_conntrack_update(dev, queue, update, act_idx,\n+\t\t\t\t\t       job, push, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_METER_MARK:\n+\t\taso = true;\n \t\tmeter_mark = &upd_meter_mark->meter_mark;\n \t\t/* Find ASO object. */\n \t\taso_mtr = mlx5_ipool_get(pool->idx_pool, idx);\n-\t\tif (!aso_mtr)\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\tif (!aso_mtr) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Invalid meter_mark update index\");\n+\t\t\tbreak;\n+\t\t}\n \t\tfm = &aso_mtr->fm;\n \t\tif (upd_meter_mark->profile_valid)\n \t\t\tfm->profile = (struct mlx5_flow_meter_profile *)\n@@ -6751,25 +6936,46 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\tfm->is_enable = meter_mark->state;\n \t\t/* Update ASO flow meter by wqe. */\n \t\tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue,\n-\t\t\t\t\t\t aso_mtr, &priv->mtr_bulk))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t aso_mtr, &priv->mtr_bulk, job, push)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to update ASO meter WQE\");\n+\t\t\tbreak;\n+\t\t}\n \t\t/* Wait for ASO object completion. */\n \t\tif (queue == MLX5_HW_INV_QUEUE &&\n-\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to wait for ASO meter CQE\");\n+\t\t}\n \t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n-\t\treturn flow_dv_action_update(dev, handle, update, error);\n+\t\tret = flow_dv_action_update(dev, handle, update, error);\n+\t\tbreak;\n \tdefault:\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\tret = -ENOTSUP;\n+\t\trte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"action type not supported\");\n+\t\tbreak;\n \t}\n-\treturn 0;\n+\tif (job) {\n+\t\tif (ret) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn ret;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn 0;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n+\t}\n+\treturn ret;\n }\n \n /**\n@@ -6804,15 +7010,28 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \tuint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tstruct mlx5_flow_meter_info *fm;\n+\tbool push = true;\n+\tbool aso = false;\n+\tint ret = 0;\n \n-\tRTE_SET_USED(queue);\n-\tRTE_SET_USED(attr);\n-\tRTE_SET_USED(user_data);\n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx))\n+\t\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Action destroy failed due to queue full.\");\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_DESTROY;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n-\t\treturn mlx5_hws_age_action_destroy(priv, age_idx, error);\n+\t\tret = mlx5_hws_age_action_destroy(priv, age_idx, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\tage_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);\n \t\tif (age_idx != 0)\n@@ -6821,39 +7040,69 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t * time to update the AGE.\n \t\t\t */\n \t\t\tmlx5_hws_age_nb_cnt_decrease(priv, age_idx);\n-\t\treturn mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);\n+\t\tret = mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n-\t\treturn flow_hw_conntrack_destroy(dev, act_idx, error);\n+\t\tret = flow_hw_conntrack_destroy(dev, act_idx, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_METER_MARK:\n \t\taso_mtr = mlx5_ipool_get(pool->idx_pool, idx);\n-\t\tif (!aso_mtr)\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\tif (!aso_mtr) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Invalid meter_mark destroy index\");\n+\t\t\tbreak;\n+\t\t}\n \t\tfm = &aso_mtr->fm;\n \t\tfm->is_enable = 0;\n \t\t/* Update ASO flow meter by wqe. */\n \t\tif (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,\n-\t\t\t\t\t\t &priv->mtr_bulk))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t &priv->mtr_bulk, job, push)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to update ASO meter WQE\");\n+\t\t\tbreak;\n+\t\t}\n \t\t/* Wait for ASO object completion. */\n \t\tif (queue == MLX5_HW_INV_QUEUE &&\n-\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {\n+\t\t\tret = -EINVAL;\n+\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to wait for ASO meter CQE\");\n-\t\tmlx5_ipool_free(pool->idx_pool, idx);\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (!job)\n+\t\t\tmlx5_ipool_free(pool->idx_pool, idx);\n+\t\telse\n+\t\t\taso = true;\n \t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n-\t\treturn flow_dv_action_destroy(dev, handle, error);\n+\t\tret = flow_dv_action_destroy(dev, handle, error);\n+\t\tbreak;\n \tdefault:\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\tret = -ENOTSUP;\n+\t\trte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"action type not supported\");\n+\t\tbreak;\n \t}\n-\treturn 0;\n+\tif (job) {\n+\t\tif (ret) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn ret;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn ret;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n+\t}\n+\treturn ret;\n }\n \n static int\n@@ -7083,28 +7332,76 @@ flow_hw_action_update(struct rte_eth_dev *dev,\n }\n \n static int\n-flow_hw_action_query(struct rte_eth_dev *dev,\n-\t\t     const struct rte_flow_action_handle *handle, void *data,\n-\t\t     struct rte_flow_error *error)\n+flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t    const struct rte_flow_op_attr *attr,\n+\t\t\t    const struct rte_flow_action_handle *handle,\n+\t\t\t    void *data, void *user_data,\n+\t\t\t    struct rte_flow_error *error)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \tuint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;\n+\tint ret;\n+\tbool push = true;\n+\tbool aso = false;\n \n+\tif (attr) {\n+\t\tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n+\t\tif (unlikely(!priv->hw_q[queue].job_idx))\n+\t\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\"Action destroy failed due to queue full.\");\n+\t\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\t\tjob->type = MLX5_HW_Q_JOB_TYPE_QUERY;\n+\t\tjob->user_data = user_data;\n+\t\tpush = !attr->postpone;\n+\t}\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n-\t\treturn flow_hw_query_age(dev, age_idx, data, error);\n+\t\tret = flow_hw_query_age(dev, age_idx, data, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n-\t\treturn flow_hw_query_counter(dev, act_idx, data, error);\n+\t\tret = flow_hw_query_counter(dev, act_idx, data, error);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n-\t\treturn flow_hw_conntrack_query(dev, act_idx, data, error);\n-\tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n-\t\treturn flow_dv_action_query(dev, handle, data, error);\n+\t\taso = true;\n+\t\tif (job)\n+\t\t\tjob->profile = (struct rte_flow_action_conntrack *)data;\n+\t\tret = flow_hw_conntrack_query(dev, queue, act_idx, data,\n+\t\t\t\t\t      job, push, error);\n+\t\tbreak;\n \tdefault:\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\tret = -ENOTSUP;\n+\t\trte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"action type not supported\");\n+\t\tbreak;\n+\t}\n+\tif (job) {\n+\t\tif (ret) {\n+\t\t\tpriv->hw_q[queue].job_idx++;\n+\t\t\treturn ret;\n+\t\t}\n+\t\tjob->action = handle;\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n+\t\tif (aso)\n+\t\t\treturn ret;\n+\t\trte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :\n+\t\t\t\t priv->hw_q[queue].indir_iq, job);\n \t}\n+\treturn 0;\n+}\n+\n+static int\n+flow_hw_action_query(struct rte_eth_dev *dev,\n+\t\t     const struct rte_flow_action_handle *handle, void *data,\n+\t\t     struct rte_flow_error *error)\n+{\n+\treturn flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,\n+\t\t\thandle, data, NULL, error);\n }\n \n /**\n@@ -7219,6 +7516,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.async_action_create = flow_hw_action_handle_create,\n \t.async_action_destroy = flow_hw_action_handle_destroy,\n \t.async_action_update = flow_hw_action_handle_update,\n+\t.async_action_query = flow_hw_action_handle_query,\n \t.action_validate = flow_hw_action_validate,\n \t.action_create = flow_hw_action_create,\n \t.action_destroy = flow_hw_action_destroy,\ndiff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c\nindex fd1337ae73..480ac6c8ec 100644\n--- a/drivers/net/mlx5/mlx5_flow_meter.c\n+++ b/drivers/net/mlx5/mlx5_flow_meter.c\n@@ -1627,7 +1627,7 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,\n \t\tfm->is_enable = !!is_enable;\n \t\taso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);\n \t\tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n-\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk);\n+\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk, NULL, true);\n \t\tif (ret)\n \t\t\treturn ret;\n \t\tret = mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr);\n@@ -1877,7 +1877,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,\n \tif (priv->sh->meter_aso_en) {\n \t\taso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);\n \t\tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n-\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk);\n+\t\t\t\t\t\t   aso_mtr, &priv->mtr_bulk, NULL, true);\n \t\tif (ret)\n \t\t\tgoto error;\n \t\tif (!priv->mtr_idx_tbl) {\n@@ -1983,7 +1983,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id,\n \tfm->initialized = 1;\n \t/* Update ASO flow meter by wqe. */\n \tret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr,\n-\t\t\t\t\t   &priv->mtr_bulk);\n+\t\t\t\t\t   &priv->mtr_bulk, NULL, true);\n \tif (ret)\n \t\treturn -rte_mtr_error_set(error, ENOTSUP,\n \t\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED,\n",
    "prefixes": [
        "v3",
        "14/17"
    ]
}