get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/92824/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 92824,
    "url": "http://patches.dpdk.org/api/patches/92824/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210505065008.30680-10-bingz@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210505065008.30680-10-bingz@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210505065008.30680-10-bingz@nvidia.com",
    "date": "2021-05-05T06:50:00",
    "name": "[v3,09/17] net/mlx5: add ASO CT query implementation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a1425f9a6310e309c29c066e229bc3f66734aaa4",
    "submitter": {
        "id": 1976,
        "url": "http://patches.dpdk.org/api/people/1976/?format=api",
        "name": "Bing Zhao",
        "email": "bingz@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210505065008.30680-10-bingz@nvidia.com/mbox/",
    "series": [
        {
            "id": 16818,
            "url": "http://patches.dpdk.org/api/series/16818/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=16818",
            "date": "2021-05-05T06:49:53",
            "name": "[v3,01/17] common/mlx5: add connection tracking object definition",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/16818/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/92824/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/92824/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A6DBCA0524;\n\tWed,  5 May 2021 08:52:19 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A613B41142;\n\tWed,  5 May 2021 08:51:28 +0200 (CEST)",
            "from NAM12-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam12on2057.outbound.protection.outlook.com [40.107.237.57])\n by mails.dpdk.org (Postfix) with ESMTP id 75408410F9\n for <dev@dpdk.org>; Wed,  5 May 2021 08:51:18 +0200 (CEST)",
            "from BN6PR16CA0034.namprd16.prod.outlook.com (2603:10b6:405:14::20)\n by BN8PR12MB4785.namprd12.prod.outlook.com (2603:10b6:408:a2::12)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4087.35; Wed, 5 May\n 2021 06:51:17 +0000",
            "from BN8NAM11FT046.eop-nam11.prod.protection.outlook.com\n (2603:10b6:405:14:cafe::d7) by BN6PR16CA0034.outlook.office365.com\n (2603:10b6:405:14::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4108.24 via Frontend\n Transport; Wed, 5 May 2021 06:51:17 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT046.mail.protection.outlook.com (10.13.177.127) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4108.25 via Frontend Transport; Wed, 5 May 2021 06:51:17 +0000",
            "from nvidia.com (172.20.145.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 5 May\n 2021 06:51:06 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=gPIh7qoRmfJ93/ERBbEWjg7M/gK0NP7tTjY+FNVURVEdxe2t/Sx/kIlxXstbqOiviLp/yliYdCbB7m2oeV68PMSgKipiM/Z7mxuEdbQt0VvtEzQ6ag721SyKPbzVp3sa5hws8GqT7BJtxEiw1+z8isXeb2tuNTkTkXwmLJh46pSYjXdjYMEKWP4UUUMPQ7XOx1nscxHxJyCxNFPxc4uW2/RCAC8aDsKZ1S5/Fk0hfVjWo4ePJKsVTQIUXGje/yeGQjUhVW+mJcFTyL/J2hKGz6J+WiPphtCBaQiUpUTAk2f0DTxucsdALRayfOFoEGs0EaFKS+1sumyx9+Cc4Ahncg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=V9oS0w2u0biuFn4RZX9tWQnykRakgzi3zdNKt9UUlcE=;\n b=iKyoYpgq397GFXnSoKq3AGXGsQGMl1rGz4Jpzh+LQjUCSZIwD7miyXchQH0x9wyfttXYJ8u1alknDlomZZQm58gxhW1wWqgXPatNis2Lm8ntK8XoFWhwlDXgBbBMHefZkWA2jadn42qEOr9prDsqTNFK1q9mNc+H1G1goAsF3PvAH+oynCLe8mMnQTq/RLiw1zOYyGO8eNNLuN8EwL+h4bcZzU2u8ObrjTKBjjSzTJayEl/y0UDKGbg/SeDAlmZY+4A1kuvZMpx082XNSNc65D7koLqY/80WvzxdVkt1+ty7VJyPlLJRbIkYDCAOaE9wkhpOvEbrb0HELOPx8wZNwA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=V9oS0w2u0biuFn4RZX9tWQnykRakgzi3zdNKt9UUlcE=;\n b=ePUkA2HLLq6lIEa2Iio6b7qTSUBh7k23h0PtKn6IssFCA2XDlnT8XkAF54e+k+IqVHhS9y5PzawwFKhfMijAgJJ1XuAd2smJWJLRbN5IRv9yO6lEX91yslzN4hF9Id80FHEc1ooZkiRui78LN64f8jm2zoAbuyXum2CkurOXPFoSW1Ev5jZtUoz/duAcx8vxnEtFWMSqzD6qLSC3weHxK5zAlal2wOcHWWC5w37rL0+0gYsdaFl5WQWThSiRrxei0HUOGZj6vET03I9KDZyolvk/F5+aMqrZmwJUn1KqfmSq6MAij66ncAGzw/cNY46BCTJrAt26/buiOfsqW3LuLA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Bing Zhao <bingz@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>, <thomas@monjalon.net>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>, <rasland@nvidia.com>",
        "Date": "Wed, 5 May 2021 09:50:00 +0300",
        "Message-ID": "<20210505065008.30680-10-bingz@nvidia.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210505065008.30680-1-bingz@nvidia.com>",
        "References": "<20210427153811.11554-1-bingz@nvidia.com>\n <20210505065008.30680-1-bingz@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.145.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "c31b63e5-0564-4945-1476-08d90f922ea5",
        "X-MS-TrafficTypeDiagnostic": "BN8PR12MB4785:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <BN8PR12MB478589E0F158FD54E5238555D0599@BN8PR12MB4785.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:112;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n DzdqTg5AjNLVBwZehkK4aKFZqwnhrvfkzRHD/6nKxQ+VDyYs8Mh69wS2qvETop7TmEmh03bZZdjfHU8LjP/gG+fKxbNU3THRagDP9+PNw/GbcA/LW1oOyEjt8dX5Iw3hlEU0hU1Vphxn+oJBTtxr9aID/b3oVUXWKF00Z1M/2M2oFLSS0+G8/qGw3QVEJlD0q43fx/ia2oChTl37aAGnL2HSPXc6Yle7Nk4FyrWNPXrc0+xs4wjuxU6BkSQrMehClqVKYZBAyVQCyE5uAYoGQWOY4h11gMlgmQewMYYFzj/9oapWY+q4ToFxdg0Z18TzL57gZu2MfJudRZH1k9Du0XU2jWw8WTFlY13fFKofVxOGXaXA281UdpKRnjVbdrYK4iB/ubX/jANvAQhUAlSxqBbW02Em5cwFtsbanECsJCtpyfpFvjQbXgAowqEvoMoznrjJNzuXwAFvWoGCcIozn2QdiHiE9fYXejG66TdqvHT/OJVO0H8jsBMpwHBo5nTb1+sqJNiY/THWGp/xMARVBnmAgLyLho5b9uwrCGtFBRuKCgfnHMPj3tFtEnSgkxZZYnbLp0w2vUISy93onEQb7Rak2r6tLaI5wbFaXctgmMP1qgEHcUEylu3b5rLL3wyWin1LW8Ly7Us3JdHDWe90L7ybwVn3xnsEswt39U6DmU8=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(346002)(39860400002)(376002)(136003)(396003)(46966006)(36840700001)(55016002)(356005)(1076003)(82740400003)(110136005)(54906003)(36756003)(83380400001)(8676002)(86362001)(336012)(70586007)(7696005)(107886003)(70206006)(47076005)(478600001)(7636003)(2616005)(316002)(30864003)(26005)(5660300002)(426003)(2906002)(36860700001)(6286002)(16526019)(82310400003)(36906005)(8936002)(6666004)(4326008)(186003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "05 May 2021 06:51:17.0159 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n c31b63e5-0564-4945-1476-08d90f922ea5",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT046.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN8PR12MB4785",
        "Subject": "[dpdk-dev] [PATCH v3 09/17] net/mlx5: add ASO CT query\n implementation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "After the connection tracking context is created and being used by\nthe flows, the context will be updated by the HW automatically after\na packet passed the CT validation. E.g., the ACK, SEQ, window and\nstate of CT can be updated with both direction traffic.\n\nIn order to query the updated contents of this context, a WQE should\nbe posted to the SQ with a return buffer. The data will be filled\ninto the buffer. And the profile will be filled with specific value.\n\nDuring the execution of query command, the context may be updated.\nThe result of the query command may not be the latest one.\n\nSigned-off-by: Bing Zhao <bingz@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h          |  10 +-\n drivers/net/mlx5/mlx5_flow_aso.c | 245 +++++++++++++++++++++++++++++++\n drivers/net/mlx5/mlx5_flow_dv.c  |  19 +++\n 3 files changed, 273 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex de18a59c8e..d2827e78d7 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -490,7 +490,10 @@ struct mlx5_aso_sq_elem {\n \t\t\tuint16_t burst_size;\n \t\t};\n \t\tstruct mlx5_aso_mtr *mtr;\n-\t\tstruct mlx5_aso_ct_action *ct;\n+\t\tstruct {\n+\t\t\tstruct mlx5_aso_ct_action *ct;\n+\t\t\tchar *query_data;\n+\t\t};\n \t};\n };\n \n@@ -1702,5 +1705,10 @@ int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,\n int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n \t\t\t      const struct rte_flow_action_conntrack *profile);\n+int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t   struct mlx5_aso_ct_action *ct);\n+int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t     struct mlx5_aso_ct_action *ct,\n+\t\t\t     struct rte_flow_action_conntrack *profile);\n \n #endif /* RTE_PMD_MLX5_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex 3c2350a6b8..3f7ed371bf 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -933,6 +933,7 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_aso_ct_pools_mng *mng,\n \t/* Fill next WQE. */\n \tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);\n \tsq->elts[sq->head & mask].ct = ct;\n+\tsq->elts[sq->head & mask].query_data = NULL;\n \tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n@@ -1048,9 +1049,95 @@ mlx5_aso_ct_status_update(struct mlx5_aso_sq *sq, uint16_t num)\n \t\tct = sq->elts[idx].ct;\n \t\tMLX5_ASSERT(ct);\n \t\tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_READY);\n+\t\tif (sq->elts[idx].query_data)\n+\t\t\trte_memcpy(sq->elts[idx].query_data,\n+\t\t\t\t   (char *)((uintptr_t)sq->mr.addr + idx * 64),\n+\t\t\t\t   64);\n \t}\n }\n \n+/*\n+ * Post a WQE to the ASO CT SQ to query the current context.\n+ *\n+ * @param[in] mng\n+ *   Pointer to the CT pools management structure.\n+ * @param[in] ct\n+ *   Pointer to the generic CT structure related to the context.\n+ * @param[in] data\n+ *   Pointer to data area to be filled.\n+ *\n+ * @return\n+ *   1 on success (WQE number), 0 on failure.\n+ */\n+static int\n+mlx5_aso_ct_sq_query_single(struct mlx5_aso_ct_pools_mng *mng,\n+\t\t\t    struct mlx5_aso_ct_action *ct, char *data)\n+{\n+\tvolatile struct mlx5_aso_wqe *wqe = NULL;\n+\tstruct mlx5_aso_sq *sq = &mng->aso_sq;\n+\tuint16_t size = 1 << sq->log_desc_n;\n+\tuint16_t mask = size - 1;\n+\tuint16_t res;\n+\tuint16_t wqe_idx;\n+\tstruct mlx5_aso_ct_pool *pool;\n+\tenum mlx5_aso_ct_state state =\n+\t\t\t\t__atomic_load_n(&ct->state, __ATOMIC_RELAXED);\n+\n+\tif (state == ASO_CONNTRACK_FREE) {\n+\t\tDRV_LOG(ERR, \"Fail: No context to query\");\n+\t\treturn -1;\n+\t} else if (state == ASO_CONNTRACK_WAIT) {\n+\t\treturn 0;\n+\t}\n+\trte_spinlock_lock(&sq->sqsl);\n+\tres = size - (uint16_t)(sq->head - sq->tail);\n+\tif (unlikely(!res)) {\n+\t\trte_spinlock_unlock(&sq->sqsl);\n+\t\tDRV_LOG(ERR, \"Fail: SQ is full and no free WQE to send\");\n+\t\treturn 0;\n+\t}\n+\tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_QUERY);\n+\twqe = &sq->sq_obj.aso_wqes[sq->head & mask];\n+\t/* Confirm the location and address of the prefetch instruction. */\n+\trte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);\n+\t/* Fill next WQE. */\n+\twqe_idx = sq->head & mask;\n+\tsq->elts[wqe_idx].ct = ct;\n+\tsq->elts[wqe_idx].query_data = data;\n+\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\t/* Each WQE will have a single CT object. */\n+\twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n+\t\t\t\t\t\t  ct->offset);\n+\twqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |\n+\t\t\t(ASO_OPC_MOD_CONNECTION_TRACKING <<\n+\t\t\t WQE_CSEG_OPC_MOD_OFFSET) |\n+\t\t\tsq->pi << WQE_CSEG_WQE_INDEX_OFFSET);\n+\t/*\n+\t * There is no write request is required.\n+\t * ASO_OPER_LOGICAL_AND and ASO_OP_ALWAYS_FALSE are both 0.\n+\t * \"BYTEWISE_64BYTE\" is needed for a whole context.\n+\t * Set to 0 directly to reduce an endian swap. (Modify should rewrite.)\n+\t * \"data_mask\" is ignored.\n+\t * Buffer address was already filled during initialization.\n+\t */\n+\twqe->aso_cseg.operand_masks = rte_cpu_to_be_32(BYTEWISE_64BYTE <<\n+\t\t\t\t\tASO_CSEG_DATA_MASK_MODE_OFFSET);\n+\twqe->aso_cseg.data_mask = 0;\n+\tsq->head++;\n+\t/*\n+\t * Each WQE contains 2 WQEBB's, even though\n+\t * data segment is not used in this case.\n+\t */\n+\tsq->pi += 2;\n+\trte_io_wmb();\n+\tsq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);\n+\trte_wmb();\n+\t*sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */\n+\trte_wmb();\n+\trte_spinlock_unlock(&sq->sqsl);\n+\treturn 1;\n+}\n+\n /*\n  * Handle completions from WQEs sent to ASO CT.\n  *\n@@ -1143,3 +1230,161 @@ mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\tct->offset, pool->index);\n \treturn -1;\n }\n+\n+/*\n+ * The routine is used to wait for WQE completion to continue with queried data.\n+ *\n+ * @param[in] sh\n+ *   Pointer to mlx5_dev_ctx_shared object.\n+ * @param[in] ct\n+ *   Pointer to connection tracking offload object.\n+ *\n+ * @return\n+ *   0 on success, -1 on failure.\n+ */\n+int\n+mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,\n+\t\t       struct mlx5_aso_ct_action *ct)\n+{\n+\tstruct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;\n+\tuint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n+\tstruct mlx5_aso_ct_pool *pool;\n+\n+\tif (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==\n+\t    ASO_CONNTRACK_READY)\n+\t\treturn 0;\n+\tdo {\n+\t\tmlx5_aso_ct_completion_handle(mng);\n+\t\tif (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==\n+\t\t    ASO_CONNTRACK_READY)\n+\t\t\treturn 0;\n+\t\t/* Waiting for CQE ready, consider should block or sleep. */\n+\t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n+\t} while (--poll_cqe_times);\n+\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tDRV_LOG(ERR, \"Fail to poll CQE for ASO CT %d in pool %d\",\n+\t\tct->offset, pool->index);\n+\treturn -1;\n+}\n+\n+/*\n+ * Convert the hardware conntrack data format into the profile.\n+ *\n+ * @param[in] profile\n+ *   Pointer to conntrack profile to be filled after query.\n+ * @param[in] wdata\n+ *   Pointer to data fetched from hardware.\n+ */\n+static inline void\n+mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,\n+\t\t\tchar *wdata)\n+{\n+\tvoid *o_dir = MLX5_ADDR_OF(conn_track_aso, wdata, original_dir);\n+\tvoid *r_dir = MLX5_ADDR_OF(conn_track_aso, wdata, reply_dir);\n+\n+\t/* MLX5_GET16 should be taken into consideration. */\n+\tprofile->state = (enum rte_flow_conntrack_state)\n+\t\t\t MLX5_GET(conn_track_aso, wdata, state);\n+\tprofile->enable = !MLX5_GET(conn_track_aso, wdata, freeze_track);\n+\tprofile->selective_ack = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\t  sack_permitted);\n+\tprofile->live_connection = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\t    connection_assured);\n+\tprofile->challenge_ack_passed = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\t\t challenged_acked);\n+\tprofile->max_ack_window = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\t   max_ack_window);\n+\tprofile->retransmission_limit = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\t\t retranmission_limit);\n+\tprofile->last_window = MLX5_GET(conn_track_aso, wdata, last_win);\n+\tprofile->last_direction = MLX5_GET(conn_track_aso, wdata, last_dir);\n+\tprofile->last_index = (enum rte_flow_conntrack_tcp_last_index)\n+\t\t\t      MLX5_GET(conn_track_aso, wdata, last_index);\n+\tprofile->last_seq = MLX5_GET(conn_track_aso, wdata, last_seq);\n+\tprofile->last_ack = MLX5_GET(conn_track_aso, wdata, last_ack);\n+\tprofile->last_end = MLX5_GET(conn_track_aso, wdata, last_end);\n+\tprofile->liberal_mode = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\treply_direction_tcp_liberal_enabled) |\n+\t\t\t\tMLX5_GET(conn_track_aso, wdata,\n+\t\t\t\toriginal_direction_tcp_liberal_enabled);\n+\t/* No liberal in the RTE structure profile. */\n+\tprofile->reply_dir.scale = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\t    reply_direction_tcp_scale);\n+\tprofile->reply_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\treply_direction_tcp_close_initiated);\n+\tprofile->reply_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\treply_direction_tcp_data_unacked);\n+\tprofile->reply_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\treply_direction_tcp_max_ack);\n+\tprofile->reply_dir.sent_end = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t       r_dir, sent_end);\n+\tprofile->reply_dir.reply_end = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t\tr_dir, reply_end);\n+\tprofile->reply_dir.max_win = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t      r_dir, max_win);\n+\tprofile->reply_dir.max_ack = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t      r_dir, max_ack);\n+\tprofile->original_dir.scale = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\t       original_direction_tcp_scale);\n+\tprofile->original_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\toriginal_direction_tcp_close_initiated);\n+\tprofile->original_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\toriginal_direction_tcp_data_unacked);\n+\tprofile->original_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,\n+\t\t\t\t\toriginal_direction_tcp_max_ack);\n+\tprofile->original_dir.sent_end = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t\t  o_dir, sent_end);\n+\tprofile->original_dir.reply_end = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t\t   o_dir, reply_end);\n+\tprofile->original_dir.max_win = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t\t o_dir, max_win);\n+\tprofile->original_dir.max_ack = MLX5_GET(tcp_window_params,\n+\t\t\t\t\t\t o_dir, max_ack);\n+}\n+\n+/*\n+ * Query connection tracking information parameter by send WQE.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] ct\n+ *   Pointer to connection tracking offload object.\n+ * @param[out] profile\n+ *   Pointer to connection tracking TCP information.\n+ *\n+ * @return\n+ *   0 on success, -1 on failure.\n+ */\n+int\n+mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t struct mlx5_aso_ct_action *ct,\n+\t\t\t struct rte_flow_action_conntrack *profile)\n+{\n+\tstruct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;\n+\tuint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n+\tstruct mlx5_aso_ct_pool *pool;\n+\tchar out_data[64 * 2];\n+\tint ret;\n+\n+\tMLX5_ASSERT(ct);\n+\tdo {\n+\t\tmlx5_aso_ct_completion_handle(mng);\n+\t\tret = mlx5_aso_ct_sq_query_single(mng, ct, out_data);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t\telse if (ret > 0)\n+\t\t\tgoto data_handle;\n+\t\t/* Waiting for wqe resource or state. */\n+\t\telse\n+\t\t\trte_delay_us_sleep(10u);\n+\t} while (--poll_wqe_times);\n+\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tDRV_LOG(ERR, \"Fail to send WQE for ASO CT %d in pool %d\",\n+\t\tct->offset, pool->index);\n+\treturn -1;\n+data_handle:\n+\tret = mlx5_aso_ct_wait_ready(sh, ct);\n+\tif (!ret)\n+\t\tmlx5_aso_ct_obj_analyze(profile, out_data);\n+\treturn ret;\n+}\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex c8ff693e4c..84e7f0b3d3 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -13775,6 +13775,8 @@ flow_dv_action_query(struct rte_eth_dev *dev,\n \tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \tuint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_action *ct;\n \n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n@@ -13788,6 +13790,23 @@ flow_dv_action_query(struct rte_eth_dev *dev,\n \t\t\tresp->sec_since_last_hit = __atomic_load_n\n \t\t\t     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);\n \t\treturn 0;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\tct = flow_aso_ct_get_by_idx(dev, idx);\n+\t\tif (!ct->refcnt)\n+\t\t\treturn rte_flow_error_set(error, EFAULT,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\tNULL,\n+\t\t\t\t\t\"CT object is inactive\");\n+\t\t((struct rte_flow_action_conntrack *)data)->peer_port =\n+\t\t\t\t\t\t\tct->peer;\n+\t\t((struct rte_flow_action_conntrack *)data)->is_original_dir =\n+\t\t\t\t\t\t\tct->is_original;\n+\t\tif (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))\n+\t\t\treturn rte_flow_error_set(error, EIO,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\tNULL,\n+\t\t\t\t\t\"Failed to query CT context\");\n+\t\treturn 0;\n \tdefault:\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n",
    "prefixes": [
        "v3",
        "09/17"
    ]
}