get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/116760/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 116760,
    "url": "https://patches.dpdk.org/api/patches/116760/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220923144334.27736-22-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220923144334.27736-22-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220923144334.27736-22-suanmingm@nvidia.com",
    "date": "2022-09-23T14:43:28",
    "name": "[21/27] net/mlx5: add HW steering connection tracking support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "866c5cc3204b3184dfd56118970e3e97f990173b",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220923144334.27736-22-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 24805,
            "url": "https://patches.dpdk.org/api/series/24805/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=24805",
            "date": "2022-09-23T14:43:07",
            "name": "net/mlx5: HW steering PMD update",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/24805/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/116760/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/116760/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E71B5A054A;\n\tFri, 23 Sep 2022 16:47:10 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0CD6F42C35;\n\tFri, 23 Sep 2022 16:44:49 +0200 (CEST)",
            "from NAM12-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam12on2069.outbound.protection.outlook.com [40.107.237.69])\n by mails.dpdk.org (Postfix) with ESMTP id 0DFA842BAB\n for <dev@dpdk.org>; Fri, 23 Sep 2022 16:44:46 +0200 (CEST)",
            "from MW4PR03CA0076.namprd03.prod.outlook.com (2603:10b6:303:b6::21)\n by DM4PR12MB6086.namprd12.prod.outlook.com (2603:10b6:8:b2::16) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5632.16; Fri, 23 Sep\n 2022 14:44:44 +0000",
            "from CO1NAM11FT034.eop-nam11.prod.protection.outlook.com\n (2603:10b6:303:b6:cafe::d0) by MW4PR03CA0076.outlook.office365.com\n (2603:10b6:303:b6::21) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5654.20 via Frontend\n Transport; Fri, 23 Sep 2022 14:44:43 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n CO1NAM11FT034.mail.protection.outlook.com (10.13.174.248) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5654.14 via Frontend Transport; Fri, 23 Sep 2022 14:44:43 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Fri, 23 Sep\n 2022 07:44:26 -0700",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Fri, 23 Sep\n 2022 07:44:25 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=iu12ZZfkPN+tx4hgBJLcZEXEcUBlgG5HH8pQvjOk8prHQsGJhJovhITO93FBkxEM6YGuKViD4OjVQCZA76NxabvsE8ksVrCkMHtuNol+miVhRO9eigVYWifowBR5U7xMIf14q9xrgXJfWBc0LWiwPmEmKhEBuV5Uje0eO+mppMtwFbOEdwDgXd9S93N7tuCzTZnA/IAULRlpJ1WlBmFrmVKonIuTnPAq5q8J94MKu14JTRMzal6B3oHPBsFg1+x+xb+mhw27LIeO0i148qcqnAhPWnhHJOmY+qkRprGjt4L+gofo+HsPQS2D0hQHRRMTtqCS6WipCUvKhPv9ZDYebA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=6WCBB0jmpjTDj3ZxjsoioeBaW70Y1i+C/3kWR85u/jI=;\n b=Yn7mmxdIpXjvIq37jZZAdoBAYXmcW7u+kUvG0jCBF583ei7XQ+X3w2O+oYUu7rY8VxoM0XTCVR4hwR+Zd57+XtKeY7H5uKAB/tGZk5i1mW9M0IBb93Wk/n356fzbinLqaLvmw91a2a5dB5JVjewi744dnBAKGxpNszZzAEn99vP5DSIIOl08DeBtN64AMokokzz3Cnf3U0+xfLRjgPGlD7UmFYDd/d7YyDEvx2v0E8jHsS84ZN8r0+a/KX0HA/kxABaM5SlX21iLSO7TqMaGEQBACdGhCXIaKTYajuZZO5QfgOLZgpAnaKUaupqYW7hzndBaw9awGXiBfp+pxYSSnA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=6WCBB0jmpjTDj3ZxjsoioeBaW70Y1i+C/3kWR85u/jI=;\n b=VAcOy4p+QV8wPeXlF8/qvo+VNzrpkwz6V/CezztgLaPS4QjMjUBBE/jvq/+I0fQE4ZvT3anv54wS/eMnJwpNKHMyqpQCwcNP6DtHWVZ5xUb1dfa+ZwnoMGmDuk2JcGnmD1KJmk3JD2bnlqfNBLqOehIRVlEietvyakUOMCogrkrBzJmzs6wetnm/YCWTdtXfMJ3fpvrhe+uC6zcY+6yuUddZW9f+/iLCj1OEM3hDSL7id3GqUnyM2swANvPhpT8TAjwWUixuhbgpLPLSc9ovaDno8i930NPRURtAbXUImzHnnm2sN02fRi6DlJsm59iPZRkDKYA73YKRj1+j5Mf03w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>",
        "CC": "<dev@dpdk.org>",
        "Subject": "[PATCH 21/27] net/mlx5: add HW steering connection tracking support",
        "Date": "Fri, 23 Sep 2022 17:43:28 +0300",
        "Message-ID": "<20220923144334.27736-22-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220923144334.27736-1-suanmingm@nvidia.com>",
        "References": "<20220923144334.27736-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CO1NAM11FT034:EE_|DM4PR12MB6086:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "17de5242-0f41-417f-00ac-08da9d722748",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n iP8zqUkAUJb3pPRRsSCBtcurSYOnsfKvRNlGPghbK3az7sxH4PWxAspJVKq1PO+1kFwnXGO6aavf0AzhYK/fep4abIdnaayiTetDd7QrOWBHMCZykAIfeGOshoYJzfLFsqFsQjzxlzhdKP+S9tmtanj2rlpNAwNI7QBnAs5nkjllCtEGd0P6bZqtTMQRf/tRODOewvirSugXgLnvyjkDSijDu3eozsgTvDKqH8MgNzaBmwzYmX3nxa+BSCXV7yr/ZYAcdjpOPrdO26WpR5yKcFvgNGYh2Xk5Ybt1MdsNdYVWcxS6jD+8B1SErdqYI5viFPuLkCe+2l+9Ehh/nmTYXvRu1PXvZMXAnmiVEc2OjZdnAAXGpHZiGVgrQpyR/jUWR2HUke4YPu+fQd+OZ6JA/t3/Fo2RJWLtOpCS4UtNnwTPYtuuGMcNZG/fJnue01aewfz5c6+Nis+gIoZQIoyyhbfLF8Ht5s3sdjW5eslErtxgYCUsQJtB/JOqa4bIh7dw0ZgriBK3/v8L68WhMu6crXF2scWNXXv6Ev+b+nI6tb95ddxDxctaU3DOt72nDp28CumgnVYxzDj61DhmL8QGH53i4eqgiJayuq05K49EM1rqKrOsXJRbXbia9xPqA0qQA1a/HpU+6p14PAsb/OGmypgnDZQSF0zHTAVEYUzS2HL3hjWbQ2ZS4yyd7bY2+ua4L/hy436MK3Sp7pM+n7wJJRGZEXBUlJBHc6/puEu9kx3OQMDNh8PYS+qKnRP7vLhT7/ERx9wNaJUymrv5EnVpVTuMc+GvOHnHRhjVse0eURU=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(376002)(136003)(39860400002)(396003)(346002)(451199015)(46966006)(36840700001)(40470700004)(55016003)(66899012)(86362001)(2906002)(40480700001)(41300700001)(4326008)(70586007)(36756003)(70206006)(8676002)(8936002)(6636002)(6286002)(47076005)(83380400001)(7696005)(26005)(30864003)(110136005)(316002)(40460700003)(356005)(82310400005)(478600001)(186003)(16526019)(1076003)(2616005)(336012)(426003)(36860700001)(5660300002)(82740400003)(7636003)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "23 Sep 2022 14:44:43.5803 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 17de5242-0f41-417f-00ac-08da9d722748",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT034.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB6086",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This commit adds the support of connection tracking to HW steering as\nSW steering did before.\n\nDifferent with SW steering implementation, take advantage of HW steering\nbulk action allocation support, in HW steering only one single CT pool\nis needed.\n\nAn indexed pool is introduced to record allocated actions from bulk and\nCT action state etc. Once one CT action is allocated from bulk, one\nindexed object will also be allocated from the indexed pool, similar for\ndeallocate. That makes mlx5_aso_ct_action can also be managed by that\nindexed pool, no need to be reserved from mlx5_aso_ct_pool. The single\nCT pool is also saved to mlx5_aso_ct_action struct directly.\n\nThe ASO operation functions are shared with SW steering implementation.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h          |  27 ++-\n drivers/net/mlx5/mlx5_flow.h     |   4 +\n drivers/net/mlx5/mlx5_flow_aso.c |  19 +-\n drivers/net/mlx5/mlx5_flow_dv.c  |   6 +-\n drivers/net/mlx5/mlx5_flow_hw.c  | 342 ++++++++++++++++++++++++++++++-\n 5 files changed, 388 insertions(+), 10 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex be60038810..ee4823f649 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1159,7 +1159,12 @@ enum mlx5_aso_ct_state {\n \n /* Generic ASO connection tracking structure. */\n struct mlx5_aso_ct_action {\n-\tLIST_ENTRY(mlx5_aso_ct_action) next; /* Pointer to the next ASO CT. */\n+\tunion {\n+\t\tLIST_ENTRY(mlx5_aso_ct_action) next;\n+\t\t/* Pointer to the next ASO CT. Used only in SWS. */\n+\t\tstruct mlx5_aso_ct_pool *pool;\n+\t\t/* Pointer to action pool. Used only in HWS. */\n+\t};\n \tvoid *dr_action_orig; /* General action object for original dir. */\n \tvoid *dr_action_rply; /* General action object for reply dir. */\n \tuint32_t refcnt; /* Action used count in device flows. */\n@@ -1173,15 +1178,30 @@ struct mlx5_aso_ct_action {\n #define MLX5_ASO_CT_UPDATE_STATE(c, s) \\\n \t__atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)\n \n+#ifdef PEDANTIC\n+#pragma GCC diagnostic ignored \"-Wpedantic\"\n+#endif\n+\n /* ASO connection tracking software pool definition. */\n struct mlx5_aso_ct_pool {\n \tuint16_t index; /* Pool index in pools array. */\n+\t/* Free ASO CT index in the pool. Used by HWS. */\n+\tstruct mlx5_indexed_pool *cts;\n \tstruct mlx5_devx_obj *devx_obj;\n-\t/* The first devx object in the bulk, used for freeing (not yet). */\n-\tstruct mlx5_aso_ct_action actions[MLX5_ASO_CT_ACTIONS_PER_POOL];\n+\tunion {\n+\t\tvoid *dummy_action;\n+\t\t/* Dummy action to increase the reference count in the driver. */\n+\t\tstruct mlx5dr_action *dr_action;\n+\t\t/* HWS action. */\n+\t};\n+\tstruct mlx5_aso_ct_action actions[0];\n \t/* CT action structures bulk. */\n };\n \n+#ifdef PEDANTIC\n+#pragma GCC diagnostic error \"-Wpedantic\"\n+#endif\n+\n LIST_HEAD(aso_ct_list, mlx5_aso_ct_action);\n \n /* Pools management structure for ASO connection tracking pools. */\n@@ -1647,6 +1667,7 @@ struct mlx5_priv {\n \tLIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo;\n \tstruct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */\n \tstruct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */\n+\tstruct mlx5_aso_ct_pool *hws_ctpool; /* HW steering's CT pool. */\n #endif\n };\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex a6bd002dca..f7bedd9605 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -83,6 +83,10 @@ enum {\n #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \\\n \t((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))\n \n+#define MLX5_ACTION_CTX_CT_GET_IDX  MLX5_INDIRECT_ACT_CT_GET_IDX\n+#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER\n+#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX\n+\n /* Matches on selected register. */\n struct mlx5_rte_flow_item_tag {\n \tenum modify_reg id;\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex ed9272e583..34fed3f4b8 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -903,6 +903,15 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,\n \treturn -1;\n }\n \n+static inline struct mlx5_aso_ct_pool*\n+__mlx5_aso_ct_get_pool(struct mlx5_dev_ctx_shared *sh,\n+\t\t       struct mlx5_aso_ct_action *ct)\n+{\n+\tif (likely(sh->config.dv_flow_en == 2))\n+\t\treturn ct->pool;\n+\treturn container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+}\n+\n /*\n  * Post a WQE to the ASO CT SQ to modify the context.\n  *\n@@ -945,7 +954,7 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);\n \tsq->elts[sq->head & mask].ct = ct;\n \tsq->elts[sq->head & mask].query_data = NULL;\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n \t\t\t\t\t\t  ct->offset);\n@@ -1113,7 +1122,7 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \twqe_idx = sq->head & mask;\n \tsq->elts[wqe_idx].ct = ct;\n \tsq->elts[wqe_idx].query_data = data;\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n \t\t\t\t\t\t  ct->offset);\n@@ -1231,7 +1240,7 @@ mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\t/* Waiting for wqe resource. */\n \t\trte_delay_us_sleep(10u);\n \t} while (--poll_wqe_times);\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \tDRV_LOG(ERR, \"Fail to send WQE for ASO CT %d in pool %d\",\n \t\tct->offset, pool->index);\n \treturn -1;\n@@ -1267,7 +1276,7 @@ mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,\n \t\t/* Waiting for CQE ready, consider should block or sleep. */\n \t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n \t} while (--poll_cqe_times);\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \tDRV_LOG(ERR, \"Fail to poll CQE for ASO CT %d in pool %d\",\n \t\tct->offset, pool->index);\n \treturn -1;\n@@ -1383,7 +1392,7 @@ mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\telse\n \t\t\trte_delay_us_sleep(10u);\n \t} while (--poll_wqe_times);\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \tDRV_LOG(ERR, \"Fail to send WQE for ASO CT %d in pool %d\",\n \t\tct->offset, pool->index);\n \treturn -1;\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 80539fd75d..e2794c1d26 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -12790,6 +12790,7 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,\n \tstruct mlx5_devx_obj *obj = NULL;\n \tuint32_t i;\n \tuint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);\n+\tsize_t mem_size;\n \n \tobj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,\n \t\t\t\t\t\t\t  priv->sh->cdev->pdn,\n@@ -12799,7 +12800,10 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,\n \t\tDRV_LOG(ERR, \"Failed to create conn_track_offload_obj using DevX.\");\n \t\treturn NULL;\n \t}\n-\tpool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);\n+\tmem_size = sizeof(struct mlx5_aso_ct_action) *\n+\t\t   MLX5_ASO_CT_ACTIONS_PER_POOL +\n+\t\t   sizeof(*pool);\n+\tpool = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);\n \tif (!pool) {\n \t\trte_errno = ENOMEM;\n \t\tclaim_zero(mlx5_devx_cmd_destroy(obj));\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex b9d4402aed..a4a0882d15 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -14,9 +14,19 @@\n #include \"mlx5dr_send.h\"\n #include \"mlx5_hws_cnt.h\"\n \n+#define MLX5_HW_INV_QUEUE UINT32_MAX\n+\n /* The maximum actions support in the flow. */\n #define MLX5_HW_MAX_ACTS 16\n \n+/*\n+ * The default ipool threshold value indicates which per_core_cache\n+ * value to set.\n+ */\n+#define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)\n+/* The default min local cache size. */\n+#define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)\n+\n /* Default push burst threshold. */\n #define BURST_THR 32u\n \n@@ -323,6 +333,24 @@ flow_hw_tir_action_register(struct rte_eth_dev *dev,\n \treturn hrxq;\n }\n \n+static __rte_always_inline int\n+flow_hw_ct_compile(struct rte_eth_dev *dev, uint32_t idx,\n+\t\t   struct mlx5dr_rule_action *rule_act)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_action *ct;\n+\n+\tct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);\n+\tif (!ct || mlx5_aso_ct_available(priv->sh, ct))\n+\t\treturn -1;\n+\trule_act->action = priv->hws_ctpool->dr_action;\n+\trule_act->aso_ct.offset = ct->offset;\n+\trule_act->aso_ct.direction = ct->is_original ?\n+\t\tMLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :\n+\t\tMLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;\n+\treturn 0;\n+}\n+\n /**\n  * Destroy DR actions created by action template.\n  *\n@@ -622,6 +650,10 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,\n \t\t\taction_src, action_dst, act_idx))\n \t\t\treturn -1;\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\tif (flow_hw_ct_compile(dev, idx, &acts->rule_acts[action_dst]))\n+\t\t\treturn -1;\n+\t\tbreak;\n \tdefault:\n \t\tDRV_LOG(WARNING, \"Unsupported shared action type:%d\", type);\n \t\tbreak;\n@@ -1057,6 +1089,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \tbool reformat_used = false;\n \tuint16_t action_pos;\n \tuint16_t jump_pos;\n+\tuint32_t ct_idx;\n \tint err;\n \n \tflow_hw_modify_field_init(&mhdr, at);\n@@ -1279,6 +1312,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\tgoto err;\n \t\t\t}\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t\taction_pos = at->actions_off[actions - action_start];\n+\t\t\tif (masks->conf) {\n+\t\t\t\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t\t ((uint32_t)(uintptr_t)actions->conf);\n+\t\t\t\tif (flow_hw_ct_compile(dev, ct_idx,\n+\t\t\t\t\t\t       &acts->rule_acts[action_pos]))\n+\t\t\t\t\tgoto err;\n+\t\t\t} else if (__flow_hw_act_data_general_append\n+\t\t\t\t\t(priv, acts, actions->type,\n+\t\t\t\t\t actions - action_start, action_pos)) {\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -1506,6 +1553,10 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev,\n \t\t\t\t&rule_act->counter.offset))\n \t\t\treturn -1;\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\tif (flow_hw_ct_compile(dev, idx, rule_act))\n+\t\t\treturn -1;\n+\t\tbreak;\n \tdefault:\n \t\tDRV_LOG(WARNING, \"Unsupported shared action type:%d\", type);\n \t\tbreak;\n@@ -1691,6 +1742,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\tuint64_t item_flags;\n \t\tstruct mlx5_hw_jump_action *jump;\n \t\tstruct mlx5_hrxq *hrxq;\n+\t\tuint32_t ct_idx;\n \t\tcnt_id_t cnt_id;\n \n \t\taction = &actions[act_data->action_src];\n@@ -1824,6 +1876,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\treturn ret;\n \t\t\tjob->flow->cnt_id = act_data->shared_counter.id;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t ((uint32_t)(uintptr_t)action->conf);\n+\t\t\tif (flow_hw_ct_compile(dev, ct_idx,\n+\t\t\t\t\t       &rule_acts[act_data->action_dst]))\n+\t\t\t\treturn -1;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n@@ -2348,6 +2407,8 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \tif (nb_flows < cfg.trunk_size) {\n \t\tcfg.per_core_cache = 0;\n \t\tcfg.trunk_size = nb_flows;\n+\t} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {\n+\t\tcfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;\n \t}\n \t/* Check if we requires too many templates. */\n \tif (nb_item_templates > max_tpl ||\n@@ -2867,6 +2928,9 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n \t\t\t/* TODO: Validation logic */\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t\t/* TODO: Validation logic */\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -2893,6 +2957,7 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {\n \t[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,\n \t[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,\n \t[RTE_FLOW_ACTION_TYPE_COUNT] = MLX5DR_ACTION_TYP_CTR,\n+\t[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,\n };\n \n static int\n@@ -2921,6 +2986,11 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,\n \t\taction_types[*curr_off] = MLX5DR_ACTION_TYP_CTR;\n \t\t*curr_off = *curr_off + 1;\n \t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\tat->actions_off[action_src] = *curr_off;\n+\t\taction_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;\n+\t\t*curr_off = *curr_off + 1;\n+\t\tbreak;\n \tdefault:\n \t\tDRV_LOG(WARNING, \"Unsupported shared action type: %d\", type);\n \t\treturn -EINVAL;\n@@ -3375,6 +3445,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n \t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n \t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n+\t\tcase RTE_FLOW_ITEM_TYPE_CONNTRACK:\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_END:\n \t\t\titems_end = true;\n@@ -4570,6 +4641,84 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n \treturn -EINVAL;\n }\n \n+static void\n+flow_hw_ct_pool_destroy(struct rte_eth_dev *dev __rte_unused,\n+\t\t\tstruct mlx5_aso_ct_pool *pool)\n+{\n+\tif (pool->dr_action)\n+\t\tmlx5dr_action_destroy(pool->dr_action);\n+\tif (pool->devx_obj)\n+\t\tclaim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));\n+\tif (pool->cts)\n+\t\tmlx5_ipool_destroy(pool->cts);\n+\tmlx5_free(pool);\n+}\n+\n+static struct mlx5_aso_ct_pool *\n+flow_hw_ct_pool_create(struct rte_eth_dev *dev,\n+\t\t       const struct rte_flow_port_attr *port_attr)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool;\n+\tstruct mlx5_devx_obj *obj;\n+\tuint32_t nb_cts = rte_align32pow2(port_attr->nb_cts);\n+\tuint32_t log_obj_size = rte_log2_u32(nb_cts);\n+\tstruct mlx5_indexed_pool_config cfg = {\n+\t\t.size = sizeof(struct mlx5_aso_ct_action),\n+\t\t.trunk_size = 1 << 12,\n+\t\t.per_core_cache = 1 << 13,\n+\t\t.need_lock = 1,\n+\t\t.release_mem_en = !!priv->sh->config.reclaim_mode,\n+\t\t.malloc = mlx5_malloc,\n+\t\t.free = mlx5_free,\n+\t\t.type = \"mlx5_hw_ct_action\",\n+\t};\n+\tint reg_id;\n+\tuint32_t flags;\n+\n+\tpool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);\n+\tif (!pool) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tobj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,\n+\t\t\t\t\t\t\t  priv->sh->cdev->pdn,\n+\t\t\t\t\t\t\t  log_obj_size);\n+\tif (!obj) {\n+\t\trte_errno = ENODATA;\n+\t\tDRV_LOG(ERR, \"Failed to create conn_track_offload_obj using DevX.\");\n+\t\tgoto err;\n+\t}\n+\tpool->devx_obj = obj;\n+\treg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);\n+\tflags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;\n+\tif (priv->sh->config.dv_esw_en && priv->master)\n+\t\tflags |= MLX5DR_ACTION_FLAG_HWS_FDB;\n+\tpool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,\n+\t\t\t\t\t\t      (struct mlx5dr_devx_obj *)obj,\n+\t\t\t\t\t\t      reg_id - REG_C_0, flags);\n+\tif (!pool->dr_action)\n+\t\tgoto err;\n+\t/*\n+\t * No need for local cache if CT number is a small number. Since\n+\t * flow insertion rate will be very limited in that case. Here let's\n+\t * set the number to less than default trunk size 4K.\n+\t */\n+\tif (nb_cts <= cfg.trunk_size) {\n+\t\tcfg.per_core_cache = 0;\n+\t\tcfg.trunk_size = nb_cts;\n+\t} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {\n+\t\tcfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;\n+\t}\n+\tpool->cts = mlx5_ipool_create(&cfg);\n+\tif (!pool->cts)\n+\t\tgoto err;\n+\treturn pool;\n+err:\n+\tflow_hw_ct_pool_destroy(dev, pool);\n+\treturn NULL;\n+}\n+\n /**\n  * Configure port HWS resources.\n  *\n@@ -4755,6 +4904,11 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t}\n \tif (_queue_attr)\n \t\tmlx5_free(_queue_attr);\n+\tif (port_attr->nb_cts) {\n+\t\tpriv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);\n+\t\tif (!priv->hws_ctpool)\n+\t\t\tgoto err;\n+\t}\n \tif (port_attr->nb_counters) {\n \t\tpriv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,\n \t\t\t\tnb_queue);\n@@ -4763,6 +4917,10 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t}\n \treturn 0;\n err:\n+\tif (priv->hws_ctpool) {\n+\t\tflow_hw_ct_pool_destroy(dev, priv->hws_ctpool);\n+\t\tpriv->hws_ctpool = NULL;\n+\t}\n \tflow_hw_free_vport_actions(priv);\n \tfor (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {\n \t\tif (priv->hw_drop[i])\n@@ -4835,6 +4993,10 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \t}\n \tif (priv->hws_cpool)\n \t\tmlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);\n+\tif (priv->hws_ctpool) {\n+\t\tflow_hw_ct_pool_destroy(dev, priv->hws_ctpool);\n+\t\tpriv->hws_ctpool = NULL;\n+\t}\n \tmlx5_free(priv->hw_q);\n \tpriv->hw_q = NULL;\n \tclaim_zero(mlx5dr_context_close(priv->dr_ctx));\n@@ -4997,6 +5159,169 @@ flow_hw_clear_flow_metadata_config(void)\n \tmlx5_flow_hw_flow_metadata_xmeta_en = 0;\n }\n \n+static int\n+flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,\n+\t\t\t  uint32_t idx,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n+\tuint32_t ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n+\tstruct rte_eth_dev *owndev = &rte_eth_devices[owner];\n+\tstruct mlx5_priv *priv = owndev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\n+\tct = mlx5_ipool_get(pool->cts, ct_idx);\n+\tif (!ct) {\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Invalid CT destruction index\");\n+\t}\n+\t__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,\n+\t\t\t\t __ATOMIC_RELAXED);\n+\tmlx5_ipool_free(pool->cts, ct_idx);\n+\treturn 0;\n+}\n+\n+static int\n+flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n+\t\t\tstruct rte_flow_action_conntrack *profile,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n+\tuint32_t ct_idx;\n+\n+\tif (owner != PORT_ID(priv))\n+\t\treturn rte_flow_error_set(error, EACCES,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Can't query CT object owned by another port\");\n+\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n+\tct = mlx5_ipool_get(pool->cts, ct_idx);\n+\tif (!ct) {\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Invalid CT query index\");\n+\t}\n+\tprofile->peer_port = ct->peer;\n+\tprofile->is_original_dir = ct->is_original;\n+\tif (mlx5_aso_ct_query_by_wqe(priv->sh, ct, profile))\n+\t\treturn rte_flow_error_set(error, EIO,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Failed to query CT context\");\n+\treturn 0;\n+}\n+\n+\n+static int\n+flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t const struct rte_flow_modify_conntrack *action_conf,\n+\t\t\t uint32_t idx, struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\tconst struct rte_flow_action_conntrack *new_prf;\n+\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n+\tuint32_t ct_idx;\n+\tint ret = 0;\n+\n+\tif (PORT_ID(priv) != owner)\n+\t\treturn rte_flow_error_set(error, EACCES,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"Can't update CT object owned by another port\");\n+\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n+\tct = mlx5_ipool_get(pool->cts, ct_idx);\n+\tif (!ct) {\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Invalid CT update index\");\n+\t}\n+\tnew_prf = &action_conf->new_ct;\n+\tif (action_conf->direction)\n+\t\tct->is_original = !!new_prf->is_original_dir;\n+\tif (action_conf->state) {\n+\t\t/* Only validate the profile when it needs to be updated. */\n+\t\tret = mlx5_validate_action_ct(dev, new_prf, error);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);\n+\t\tif (ret)\n+\t\t\treturn rte_flow_error_set(error, EIO,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\tNULL,\n+\t\t\t\t\t\"Failed to send CT context update WQE\");\n+\t\tif (queue != MLX5_HW_INV_QUEUE)\n+\t\t\treturn 0;\n+\t\t/* Block until ready or a failure in synchronous mode. */\n+\t\tret = mlx5_aso_ct_available(priv->sh, ct);\n+\t\tif (ret)\n+\t\t\trte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"Timeout to get the CT update\");\n+\t}\n+\treturn ret;\n+}\n+\n+static struct rte_flow_action_handle *\n+flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t const struct rte_flow_action_conntrack *pro,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\tuint32_t ct_idx = 0;\n+\tint ret;\n+\n+\tif (!pool) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t   \"CT is not enabled\");\n+\t\treturn 0;\n+\t}\n+\tct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);\n+\tif (!ct) {\n+\t\trte_flow_error_set(error, rte_errno,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t   \"Failed to allocate CT object\");\n+\t\treturn 0;\n+\t}\n+\tct->offset = ct_idx - 1;\n+\tct->is_original = !!pro->is_original_dir;\n+\tct->peer = pro->peer_port;\n+\tct->pool = pool;\n+\tif (mlx5_aso_ct_update_by_wqe(priv->sh, ct, pro)) {\n+\t\tmlx5_ipool_free(pool->cts, ct_idx);\n+\t\trte_flow_error_set(error, EBUSY,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t   \"Failed to update CT\");\n+\t\treturn 0;\n+\t}\n+\tif (queue == MLX5_HW_INV_QUEUE) {\n+\t\tret = mlx5_aso_ct_available(priv->sh, ct);\n+\t\tif (ret) {\n+\t\t\tmlx5_ipool_free(pool->cts, ct_idx);\n+\t\t\trte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"Timeout to get the CT update\");\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\treturn (struct rte_flow_action_handle *)(uintptr_t)\n+\t\tMLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);\n+}\n+\n /**\n  * Create shared action.\n  *\n@@ -5044,6 +5369,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\thandle = (struct rte_flow_action_handle *)\n \t\t\t\t (uintptr_t)cnt_id;\n \t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\thandle = flow_hw_conntrack_create(dev, queue, action->conf, error);\n+\t\tbreak;\n \tdefault:\n \t\thandle = flow_dv_action_create(dev, conf, action, error);\n \t}\n@@ -5079,10 +5407,18 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t     void *user_data,\n \t\t\t     struct rte_flow_error *error)\n {\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n+\tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n+\n \tRTE_SET_USED(queue);\n \tRTE_SET_USED(attr);\n \tRTE_SET_USED(user_data);\n-\treturn flow_dv_action_update(dev, handle, update, error);\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\treturn flow_hw_conntrack_update(dev, queue, update, act_idx, error);\n+\tdefault:\n+\t\treturn flow_dv_action_update(dev, handle, update, error);\n+\t}\n }\n \n /**\n@@ -5121,6 +5457,8 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\treturn mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\treturn flow_hw_conntrack_destroy(dev, act_idx, error);\n \tdefault:\n \t\treturn flow_dv_action_destroy(dev, handle, error);\n \t}\n@@ -5274,6 +5612,8 @@ flow_hw_action_query(struct rte_eth_dev *dev,\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\treturn flow_hw_query_counter(dev, act_idx, data, error);\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\treturn flow_hw_conntrack_query(dev, act_idx, data, error);\n \tdefault:\n \t\treturn flow_dv_action_query(dev, handle, data, error);\n \t}\n",
    "prefixes": [
        "21/27"
    ]
}