get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117220/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117220,
    "url": "http://patches.dpdk.org/api/patches/117220/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220930125315.5079-11-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220930125315.5079-11-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220930125315.5079-11-suanmingm@nvidia.com",
    "date": "2022-09-30T12:53:08",
    "name": "[v3,10/17] net/mlx5: add HW steering connection tracking support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a9dade9b8e85a4b6cc38009d5ad527ff9bc773ce",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220930125315.5079-11-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 24935,
            "url": "http://patches.dpdk.org/api/series/24935/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24935",
            "date": "2022-09-30T12:52:58",
            "name": "net/mlx5: HW steering PMD update",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/24935/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/117220/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/117220/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DBE49A00C4;\n\tFri, 30 Sep 2022 14:55:05 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B3C1D42B99;\n\tFri, 30 Sep 2022 14:54:09 +0200 (CEST)",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12on2051.outbound.protection.outlook.com [40.107.244.51])\n by mails.dpdk.org (Postfix) with ESMTP id 291AF42B96\n for <dev@dpdk.org>; Fri, 30 Sep 2022 14:54:05 +0200 (CEST)",
            "from DS7PR03CA0121.namprd03.prod.outlook.com (2603:10b6:5:3b4::6) by\n MW3PR12MB4521.namprd12.prod.outlook.com (2603:10b6:303:53::13) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.23; Fri, 30 Sep 2022 12:54:02 +0000",
            "from DM6NAM11FT102.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:3b4:cafe::ad) by DS7PR03CA0121.outlook.office365.com\n (2603:10b6:5:3b4::6) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.19 via Frontend\n Transport; Fri, 30 Sep 2022 12:54:02 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n DM6NAM11FT102.mail.protection.outlook.com (10.13.173.172) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.17 via Frontend Transport; Fri, 30 Sep 2022 12:54:02 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Fri, 30 Sep\n 2022 05:53:56 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Fri, 30 Sep\n 2022 05:53:54 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=FLKTFXi8t3OsXicv6q57HkB1yPxCWn4Z8uyWvkknX788C8sBXXEoffmACMV7iCq1gXCaca6wat5YfeiqnBbSHtQUCMIdZz/nQxlUxxnEnBA0MiJH0aPAtomJCZ8rMr3ki2ElEbONEiGm/4JEJmI+QI1V+7dhU47Y1iZvGIy1hIBsLELboXdAU74Y1VsyFHQuLCTGtsYVjjK35GMIrPjdWDBXzl8ymP5wgeSbdWvyzaT/UO/ehjKx05Eyo91XCN5faexKpmk4b4pbAc8gK9s1gfItHEU4YdkOeH6fUnUVSJ8KeCuZN24y+erqLtADMwnaWkV/M/XPpu2jSWLtSUvuuQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=dsOrsHy9HWYPPMg3U+os6l+XQ55slRmppe/7Dlf5NFg=;\n b=DpahlyDERgqhQTEufkz9AQmPN0O4f5IYoeN6yuz7WHumNumCUL31JElm7bdu/P2psHU8kyPXu8BcFM14fy97WqtPsK70wtb1qiMx+S3qGYYsmFLvrKJxo+0JffZWNTRaA2s5RBeen7b2OviGA/hqQoSldpLVGdhRQcWU+P3x2mFiJJORxMJ4GkUT5sewoU3Pno80mI5fXtwraSfHGh5Zq5vIiCFxDwQAdhJjoAFZuupRaW5R/kviiey8/jhUos+RsSkG1wMlOyKHUPVF2xzDkC7tc59/mcwUNZxz1MCC4F1tuqUgeEJp2t9VeQAwPE7DLScw3W6qb4Z4pQKvOFyUWw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=dsOrsHy9HWYPPMg3U+os6l+XQ55slRmppe/7Dlf5NFg=;\n b=KUvV5F3w3VUqPTDvSVQ+NV6TVJ0TsmSU8nALRqITay9eXqqmEckmSAaQlyQ/HxM680wWJP1Zh8sQAl4BUUMNEz4cE5uRrZ1ZbvzuOxD+bR0y9nvvhaS7Z6swOMviMAxqrFUcH+NyDk7q/I4D0ujgEaq/Tr4vIZKnWWdXjCnAhw0x2DrSD4EVzTiOukfcrfdHzccdHvOOAymQRIgKS1vGmP0LcyMt/jHwYaUC5TZpXurH3/Gi2pYr+Hl/SvpVahQZCYgOHHCfV5kMxXq7WroLK9HqXvmR3y9Lk/5PxOPpZxHT/TBR3EjqXXQ0+MQhcAD+GITw8D6M10iUCmJtmMzclQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>, <orika@nvidia.com>",
        "Subject": "[PATCH v3 10/17] net/mlx5: add HW steering connection tracking\n support",
        "Date": "Fri, 30 Sep 2022 15:53:08 +0300",
        "Message-ID": "<20220930125315.5079-11-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220930125315.5079-1-suanmingm@nvidia.com>",
        "References": "<20220923144334.27736-1-suanmingm@nvidia.com>\n <20220930125315.5079-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT102:EE_|MW3PR12MB4521:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "41d820d4-5670-407d-7a00-08daa2e2d9a8",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n fqOTO3KAqxx8miopdbTUPzKTKUEP1QNsEZuE0yyQM6SmeoaXZIURumHFKVrAIBiYYX3QApklx27haGxfUWwWZCECoTMSpuJoObYU0OTRLF9AWX9GZ2eQJYx1tBJb8qLmmeIH+cnZixGZo7dyPxp8lAYmAESheXBIe93yYfl7rBY+X3Z6RaG6oRyDKsZITnPHrlwgY64+mlt/ljPXvwVRazxXPbeI7yvV8hBeA3kpW1+TglyYWSayyBrjwSePboi8CV2D/MbeQ+yQUJoAo/pG92J9lQFkIbLQAyqdGlhuMAQqGdEvDxRj6UjV0YO5PxPk8H7KB7JkaamKkjhI0PFpusL7WOCvwroImJLTaoOYXEhT2T2fqgM5mfl0s0IxiyBDJQnZcyFL3lyMqtuNWzKzfUZ5TAGi0nSegP7GhpEC+qaL8VV8Fb6ugAMFWd/0fW8eEcqlB7NquWWHz3UYeimAn9cG/Czms/XZ4mUXSs/VgOqJYP/AU3iFrHgWF/wGWWEpgYV/Tfr7UDxrPWomfTBb/KZI1m9PnwDXn7JawS4MDHEoB/HWq4I9D3XAU6qkuwsGae+iK2JkyGRMtFo7a7CVcg5TmZX2rTPNsihxD7NkxFLz9g9kEUsaJRCTocdy98sJO/F09c+NbDzw7QUAnAqw6F8KoXldtMb60J3YG15KmdAaoDky7NYLnH+0mqzHRoC8243VFIFwz6WuIa12sCuRLx8QBKPkUmWnDfeLUPnU0PtoSCqDnyVPWQiopd9LiBHOQ63VjOnIkKT88lNSY65s4enzZxhOhC7Eo38Y0Af6/+s=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(396003)(39860400002)(136003)(376002)(346002)(451199015)(46966006)(40470700004)(36840700001)(7636003)(40480700001)(66899015)(356005)(83380400001)(478600001)(55016003)(82740400003)(82310400005)(47076005)(36756003)(6286002)(26005)(2616005)(8936002)(2906002)(107886003)(6666004)(5660300002)(1076003)(186003)(41300700001)(30864003)(316002)(16526019)(7696005)(54906003)(8676002)(36860700001)(4326008)(70206006)(70586007)(426003)(86362001)(40460700003)(6636002)(110136005)(336012)(579004)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Sep 2022 12:54:02.2789 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 41d820d4-5670-407d-7a00-08daa2e2d9a8",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT102.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW3PR12MB4521",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This commit adds the support of connection tracking to HW steering as\nSW steering did before.\n\nDifferent with SW steering implementation, take advantage of HW steering\nbulk action allocation support, in HW steering only one single CT pool\nis needed.\n\nAn indexed pool is introduced to record allocated actions from bulk and\nCT action state etc. Once one CT action is allocated from bulk, one\nindexed object will also be allocated from the indexed pool, similar for\ndeallocate. That makes mlx5_aso_ct_action can also be managed by that\nindexed pool, no need to be reserved from mlx5_aso_ct_pool. The single\nCT pool is also saved to mlx5_aso_ct_action struct directly.\n\nThe ASO operation functions are shared with SW steering implementation.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c |   8 +-\n drivers/net/mlx5/mlx5.c          |   3 +-\n drivers/net/mlx5/mlx5.h          |  54 ++++-\n drivers/net/mlx5/mlx5_flow.c     |   1 +\n drivers/net/mlx5/mlx5_flow.h     |   7 +\n drivers/net/mlx5/mlx5_flow_aso.c | 212 +++++++++++++----\n drivers/net/mlx5/mlx5_flow_dv.c  |  28 ++-\n drivers/net/mlx5/mlx5_flow_hw.c  | 381 ++++++++++++++++++++++++++++++-\n 8 files changed, 617 insertions(+), 77 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 65795da516..60a1a391fb 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1349,9 +1349,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\tDRV_LOG(DEBUG, \"Flow Hit ASO is supported.\");\n \t\t}\n #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */\n-#if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \\\n-\tdefined(HAVE_MLX5_DR_ACTION_ASO_CT)\n-\t\tif (hca_attr->ct_offload && priv->mtr_color_reg == REG_C_3) {\n+#if defined (HAVE_MLX5_DR_CREATE_ACTION_ASO) && \\\n+    defined (HAVE_MLX5_DR_ACTION_ASO_CT)\n+\t\t/* HWS create CT ASO SQ based on HWS configure queue number. */\n+\t\tif (sh->config.dv_flow_en != 2 &&\n+\t\t    hca_attr->ct_offload && priv->mtr_color_reg == REG_C_3) {\n \t\t\terr = mlx5_flow_aso_ct_mng_init(sh);\n \t\t\tif (err) {\n \t\t\t\terr = -err;\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex cf7b7b7158..925e19bcd5 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -755,7 +755,8 @@ mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh)\n \n \tif (sh->ct_mng)\n \t\treturn 0;\n-\tsh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng),\n+\tsh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng) +\n+\t\t\t\t sizeof(struct mlx5_aso_sq) * MLX5_ASO_CT_SQ_NUM,\n \t\t\t\t RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n \tif (!sh->ct_mng) {\n \t\tDRV_LOG(ERR, \"ASO CT management allocation failed.\");\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex c0835e725f..0578a41675 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -39,6 +39,8 @@\n \n #define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)\n \n+#define MLX5_HW_INV_QUEUE UINT32_MAX\n+\n /*\n  * Number of modification commands.\n  * The maximal actions amount in FW is some constant, and it is 16 in the\n@@ -1159,7 +1161,12 @@ enum mlx5_aso_ct_state {\n \n /* Generic ASO connection tracking structure. */\n struct mlx5_aso_ct_action {\n-\tLIST_ENTRY(mlx5_aso_ct_action) next; /* Pointer to the next ASO CT. */\n+\tunion {\n+\t\tLIST_ENTRY(mlx5_aso_ct_action) next;\n+\t\t/* Pointer to the next ASO CT. Used only in SWS. */\n+\t\tstruct mlx5_aso_ct_pool *pool;\n+\t\t/* Pointer to action pool. Used only in HWS. */\n+\t};\n \tvoid *dr_action_orig; /* General action object for original dir. */\n \tvoid *dr_action_rply; /* General action object for reply dir. */\n \tuint32_t refcnt; /* Action used count in device flows. */\n@@ -1173,28 +1180,48 @@ struct mlx5_aso_ct_action {\n #define MLX5_ASO_CT_UPDATE_STATE(c, s) \\\n \t__atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)\n \n+#ifdef PEDANTIC\n+#pragma GCC diagnostic ignored \"-Wpedantic\"\n+#endif\n+\n /* ASO connection tracking software pool definition. */\n struct mlx5_aso_ct_pool {\n \tuint16_t index; /* Pool index in pools array. */\n+\t/* Free ASO CT index in the pool. Used by HWS. */\n+\tstruct mlx5_indexed_pool *cts;\n \tstruct mlx5_devx_obj *devx_obj;\n-\t/* The first devx object in the bulk, used for freeing (not yet). */\n-\tstruct mlx5_aso_ct_action actions[MLX5_ASO_CT_ACTIONS_PER_POOL];\n+\tunion {\n+\t\tvoid *dummy_action;\n+\t\t/* Dummy action to increase the reference count in the driver. */\n+\t\tstruct mlx5dr_action *dr_action;\n+\t\t/* HWS action. */\n+\t};\n+\tstruct mlx5_aso_sq *sq; /* Async ASO SQ. */\n+\tstruct mlx5_aso_sq *shared_sq; /* Shared ASO SQ. */\n+\tstruct mlx5_aso_ct_action actions[0];\n \t/* CT action structures bulk. */\n };\n \n LIST_HEAD(aso_ct_list, mlx5_aso_ct_action);\n \n+#define MLX5_ASO_CT_SQ_NUM 16\n+\n /* Pools management structure for ASO connection tracking pools. */\n struct mlx5_aso_ct_pools_mng {\n \tstruct mlx5_aso_ct_pool **pools;\n \tuint16_t n; /* Total number of pools. */\n \tuint16_t next; /* Number of pools in use, index of next free pool. */\n+\tuint32_t nb_sq; /* Number of ASO SQ. */\n \trte_spinlock_t ct_sl; /* The ASO CT free list lock. */\n \trte_rwlock_t resize_rwl; /* The ASO CT pool resize lock. */\n \tstruct aso_ct_list free_cts; /* Free ASO CT objects list. */\n-\tstruct mlx5_aso_sq aso_sq; /* ASO queue objects. */\n+\tstruct mlx5_aso_sq aso_sqs[0]; /* ASO queue objects. */\n };\n \n+#ifdef PEDANTIC\n+#pragma GCC diagnostic error \"-Wpedantic\"\n+#endif\n+\n /* LAG attr. */\n struct mlx5_lag {\n \tuint8_t tx_remap_affinity[16]; /* The PF port number of affinity */\n@@ -1332,8 +1359,7 @@ struct mlx5_dev_ctx_shared {\n \trte_spinlock_t geneve_tlv_opt_sl; /* Lock for geneve tlv resource */\n \tstruct mlx5_flow_mtr_mng *mtrmng;\n \t/* Meter management structure. */\n-\tstruct mlx5_aso_ct_pools_mng *ct_mng;\n-\t/* Management data for ASO connection tracking. */\n+\tstruct mlx5_aso_ct_pools_mng *ct_mng; /* Management data for ASO CT in HWS only. */\n \tstruct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */\n \tunsigned int flow_max_priority;\n \tenum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];\n@@ -1647,6 +1673,9 @@ struct mlx5_priv {\n \t/* HW steering create ongoing rte flow table list header. */\n \tLIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo;\n \tstruct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */\n+\tstruct mlx5_aso_ct_pools_mng *ct_mng;\n+\t/* Management data for ASO connection tracking. */\n+\tstruct mlx5_aso_ct_pool *hws_ctpool; /* HW steering's CT pool. */\n #endif\n };\n \n@@ -2046,15 +2075,15 @@ int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\tstruct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk);\n int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,\n \t\tstruct mlx5_aso_mtr *mtr);\n-int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n+int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n \t\t\t      const struct rte_flow_action_conntrack *profile);\n-int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,\n+int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t   struct mlx5_aso_ct_action *ct);\n-int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n+int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t     struct mlx5_aso_ct_action *ct,\n \t\t\t     struct rte_flow_action_conntrack *profile);\n-int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,\n+int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t\t  struct mlx5_aso_ct_action *ct);\n uint32_t\n mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);\n@@ -2065,6 +2094,11 @@ int mlx5_aso_cnt_queue_init(struct mlx5_dev_ctx_shared *sh);\n void mlx5_aso_cnt_queue_uninit(struct mlx5_dev_ctx_shared *sh);\n int mlx5_aso_cnt_query(struct mlx5_dev_ctx_shared *sh,\n \t\tstruct mlx5_hws_cnt_pool *cpool);\n+int mlx5_aso_ct_queue_init(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t   struct mlx5_aso_ct_pools_mng *ct_mng,\n+\t\t\t   uint32_t nb_queues);\n+int mlx5_aso_ct_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t     struct mlx5_aso_ct_pools_mng *ct_mng);\n \n /* mlx5_flow_flex.c */\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 658cc69750..cbf9c31984 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -49,6 +49,7 @@ struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];\n  */\n uint32_t mlx5_flow_hw_avl_tags_init_cnt;\n enum modify_reg mlx5_flow_hw_avl_tags[MLX5_FLOW_HW_TAGS_MAX] = {REG_NON};\n+enum modify_reg mlx5_flow_hw_aso_tag;\n \n struct tunnel_default_miss_ctx {\n \tuint16_t *queue;\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex ae1417f10e..f75a56a57b 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -82,6 +82,10 @@ enum {\n #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \\\n \t((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))\n \n+#define MLX5_ACTION_CTX_CT_GET_IDX  MLX5_INDIRECT_ACT_CT_GET_IDX\n+#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER\n+#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX\n+\n /* Matches on selected register. */\n struct mlx5_rte_flow_item_tag {\n \tenum modify_reg id;\n@@ -1444,6 +1448,7 @@ extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];\n #define MLX5_FLOW_HW_TAGS_MAX 8\n extern uint32_t mlx5_flow_hw_avl_tags_init_cnt;\n extern enum modify_reg mlx5_flow_hw_avl_tags[];\n+extern enum modify_reg mlx5_flow_hw_aso_tag;\n \n /*\n  * Get metadata match tag and mask for given rte_eth_dev port.\n@@ -1518,6 +1523,8 @@ flow_hw_get_reg_id(enum rte_flow_item_type type, uint32_t id)\n \t\t * REG_B case should be rejected on pattern template validation.\n \t\t */\n \t\treturn REG_A;\n+\tcase RTE_FLOW_ITEM_TYPE_CONNTRACK:\n+\t\treturn mlx5_flow_hw_aso_tag;\n \tcase RTE_FLOW_ITEM_TYPE_TAG:\n \t\tMLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);\n \t\treturn mlx5_flow_hw_avl_tags[id];\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex ed9272e583..c00c07b891 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -313,16 +313,8 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,\n \t\tmlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);\n \t\tbreak;\n \tcase ASO_OPC_MOD_CONNECTION_TRACKING:\n-\t\t/* 64B per object for query. */\n-\t\tif (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,\n-\t\t\t\t    &sh->ct_mng->aso_sq.mr))\n+\t\tif (mlx5_aso_ct_queue_init(sh, sh->ct_mng, MLX5_ASO_CT_SQ_NUM))\n \t\t\treturn -1;\n-\t\tif (mlx5_aso_sq_create(cdev, &sh->ct_mng->aso_sq,\n-\t\t\t\t       sh->tx_uar.obj, MLX5_ASO_QUEUE_LOG_DESC)) {\n-\t\t\tmlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);\n-\t\t\treturn -1;\n-\t\t}\n-\t\tmlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);\n \t\tbreak;\n \tdefault:\n \t\tDRV_LOG(ERR, \"Unknown ASO operation mode\");\n@@ -343,7 +335,7 @@ void\n mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n \t\t      enum mlx5_access_aso_opc_mod aso_opc_mod)\n {\n-\tstruct mlx5_aso_sq *sq;\n+\tstruct mlx5_aso_sq *sq = NULL;\n \n \tswitch (aso_opc_mod) {\n \tcase ASO_OPC_MOD_FLOW_HIT:\n@@ -354,14 +346,14 @@ mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n \t\tsq = &sh->mtrmng->pools_mng.sq;\n \t\tbreak;\n \tcase ASO_OPC_MOD_CONNECTION_TRACKING:\n-\t\tmlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);\n-\t\tsq = &sh->ct_mng->aso_sq;\n+\t\tmlx5_aso_ct_queue_uninit(sh, sh->ct_mng);\n \t\tbreak;\n \tdefault:\n \t\tDRV_LOG(ERR, \"Unknown ASO operation mode\");\n \t\treturn;\n \t}\n-\tmlx5_aso_destroy_sq(sq);\n+\tif (sq)\n+\t\tmlx5_aso_destroy_sq(sq);\n }\n \n /**\n@@ -903,6 +895,89 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,\n \treturn -1;\n }\n \n+static inline struct mlx5_aso_sq*\n+__mlx5_aso_ct_get_sq_in_hws(uint32_t queue,\n+\t\t\t    struct mlx5_aso_ct_pool *pool)\n+{\n+\treturn (queue == MLX5_HW_INV_QUEUE) ?\n+\t\tpool->shared_sq : &pool->sq[queue];\n+}\n+\n+static inline struct mlx5_aso_sq*\n+__mlx5_aso_ct_get_sq_in_sws(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t    struct mlx5_aso_ct_action *ct)\n+{\n+\treturn &sh->ct_mng->aso_sqs[ct->offset & (MLX5_ASO_CT_SQ_NUM - 1)];\n+}\n+\n+static inline struct mlx5_aso_ct_pool*\n+__mlx5_aso_ct_get_pool(struct mlx5_dev_ctx_shared *sh,\n+\t\t       struct mlx5_aso_ct_action *ct)\n+{\n+\tif (likely(sh->config.dv_flow_en == 2))\n+\t\treturn ct->pool;\n+\treturn container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+}\n+\n+int\n+mlx5_aso_ct_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t struct mlx5_aso_ct_pools_mng *ct_mng)\n+{\n+\tuint32_t i;\n+\n+\t/* 64B per object for query. */\n+\tfor (i = 0; i < ct_mng->nb_sq; i++) {\n+\t\tif (ct_mng->aso_sqs[i].mr.addr)\n+\t\t\tmlx5_aso_dereg_mr(sh->cdev, &ct_mng->aso_sqs[i].mr);\n+\t\tmlx5_aso_destroy_sq(&ct_mng->aso_sqs[i]);\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * API to create and initialize CT Send Queue used for ASO access.\n+ *\n+ * @param[in] sh\n+ *   Pointer to shared device context.\n+ * @param[in] ct_mng\n+ *   Pointer to the CT management struct.\n+ * *param[in] nb_queues\n+ *   Number of queues to be allocated.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_aso_ct_queue_init(struct mlx5_dev_ctx_shared *sh,\n+\t\t       struct mlx5_aso_ct_pools_mng *ct_mng,\n+\t\t       uint32_t nb_queues)\n+{\n+\tuint32_t i;\n+\n+\t/* 64B per object for query. */\n+\tfor (i = 0; i < nb_queues; i++) {\n+\t\tif (mlx5_aso_reg_mr(sh->cdev, 64 * (1 << MLX5_ASO_QUEUE_LOG_DESC),\n+\t\t\t\t    &ct_mng->aso_sqs[i].mr))\n+\t\t\tgoto error;\n+\t\tif (mlx5_aso_sq_create(sh->cdev, &ct_mng->aso_sqs[i],\n+\t\t\t\t       sh->tx_uar.obj,\n+\t\t\t\t       MLX5_ASO_QUEUE_LOG_DESC))\n+\t\t\tgoto error;\n+\t\tmlx5_aso_ct_init_sq(&ct_mng->aso_sqs[i]);\n+\t}\n+\tct_mng->nb_sq = nb_queues;\n+\treturn 0;\n+error:\n+\tdo {\n+\t\tif (ct_mng->aso_sqs[i].mr.addr)\n+\t\t\tmlx5_aso_dereg_mr(sh->cdev, &ct_mng->aso_sqs[i].mr);\n+\t\tif (&ct_mng->aso_sqs[i])\n+\t\t\tmlx5_aso_destroy_sq(&ct_mng->aso_sqs[i]);\n+\t} while (i--);\n+\tct_mng->nb_sq = 0;\n+\treturn -1;\n+}\n+\n /*\n  * Post a WQE to the ASO CT SQ to modify the context.\n  *\n@@ -918,11 +993,12 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,\n  */\n static uint16_t\n mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t      struct mlx5_aso_sq *sq,\n \t\t\t      struct mlx5_aso_ct_action *ct,\n-\t\t\t      const struct rte_flow_action_conntrack *profile)\n+\t\t\t      const struct rte_flow_action_conntrack *profile,\n+\t\t\t      bool need_lock)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n-\tstruct mlx5_aso_sq *sq = &sh->ct_mng->aso_sq;\n \tuint16_t size = 1 << sq->log_desc_n;\n \tuint16_t mask = size - 1;\n \tuint16_t res;\n@@ -931,11 +1007,13 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \tvoid *orig_dir;\n \tvoid *reply_dir;\n \n-\trte_spinlock_lock(&sq->sqsl);\n+\tif (need_lock)\n+\t\trte_spinlock_lock(&sq->sqsl);\n \t/* Prevent other threads to update the index. */\n \tres = size - (uint16_t)(sq->head - sq->tail);\n \tif (unlikely(!res)) {\n-\t\trte_spinlock_unlock(&sq->sqsl);\n+\t\tif (need_lock)\n+\t\t\trte_spinlock_unlock(&sq->sqsl);\n \t\tDRV_LOG(ERR, \"Fail: SQ is full and no free WQE to send\");\n \t\treturn 0;\n \t}\n@@ -945,7 +1023,7 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \tMLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);\n \tsq->elts[sq->head & mask].ct = ct;\n \tsq->elts[sq->head & mask].query_data = NULL;\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n \t\t\t\t\t\t  ct->offset);\n@@ -1028,7 +1106,8 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,\n \tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n \t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n \t\t\t   !sh->tx_uar.dbnc);\n-\trte_spinlock_unlock(&sq->sqsl);\n+\tif (need_lock)\n+\t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n }\n \n@@ -1080,10 +1159,11 @@ mlx5_aso_ct_status_update(struct mlx5_aso_sq *sq, uint16_t num)\n  */\n static int\n mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n-\t\t\t    struct mlx5_aso_ct_action *ct, char *data)\n+\t\t\t    struct mlx5_aso_sq *sq,\n+\t\t\t    struct mlx5_aso_ct_action *ct, char *data,\n+\t\t\t    bool need_lock)\n {\n \tvolatile struct mlx5_aso_wqe *wqe = NULL;\n-\tstruct mlx5_aso_sq *sq = &sh->ct_mng->aso_sq;\n \tuint16_t size = 1 << sq->log_desc_n;\n \tuint16_t mask = size - 1;\n \tuint16_t res;\n@@ -1098,10 +1178,12 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \t} else if (state == ASO_CONNTRACK_WAIT) {\n \t\treturn 0;\n \t}\n-\trte_spinlock_lock(&sq->sqsl);\n+\tif (need_lock)\n+\t\trte_spinlock_lock(&sq->sqsl);\n \tres = size - (uint16_t)(sq->head - sq->tail);\n \tif (unlikely(!res)) {\n-\t\trte_spinlock_unlock(&sq->sqsl);\n+\t\tif (need_lock)\n+\t\t\trte_spinlock_unlock(&sq->sqsl);\n \t\tDRV_LOG(ERR, \"Fail: SQ is full and no free WQE to send\");\n \t\treturn 0;\n \t}\n@@ -1113,7 +1195,7 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \twqe_idx = sq->head & mask;\n \tsq->elts[wqe_idx].ct = ct;\n \tsq->elts[wqe_idx].query_data = data;\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n+\tpool = __mlx5_aso_ct_get_pool(sh, ct);\n \t/* Each WQE will have a single CT object. */\n \twqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +\n \t\t\t\t\t\t  ct->offset);\n@@ -1141,7 +1223,8 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n \tmlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,\n \t\t\t   sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],\n \t\t\t   !sh->tx_uar.dbnc);\n-\trte_spinlock_unlock(&sq->sqsl);\n+\tif (need_lock)\n+\t\trte_spinlock_unlock(&sq->sqsl);\n \treturn 1;\n }\n \n@@ -1152,9 +1235,10 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,\n  *   Pointer to the CT pools management structure.\n  */\n static void\n-mlx5_aso_ct_completion_handle(struct mlx5_aso_ct_pools_mng *mng)\n+mlx5_aso_ct_completion_handle(struct mlx5_dev_ctx_shared *sh __rte_unused,\n+\t\t\t      struct mlx5_aso_sq *sq,\n+\t\t\t      bool need_lock)\n {\n-\tstruct mlx5_aso_sq *sq = &mng->aso_sq;\n \tstruct mlx5_aso_cq *cq = &sq->cq;\n \tvolatile struct mlx5_cqe *restrict cqe;\n \tconst uint32_t cq_size = 1 << cq->log_desc_n;\n@@ -1165,10 +1249,12 @@ mlx5_aso_ct_completion_handle(struct mlx5_aso_ct_pools_mng *mng)\n \tuint16_t n = 0;\n \tint ret;\n \n-\trte_spinlock_lock(&sq->sqsl);\n+\tif (need_lock)\n+\t\trte_spinlock_lock(&sq->sqsl);\n \tmax = (uint16_t)(sq->head - sq->tail);\n \tif (unlikely(!max)) {\n-\t\trte_spinlock_unlock(&sq->sqsl);\n+\t\tif (need_lock)\n+\t\t\trte_spinlock_unlock(&sq->sqsl);\n \t\treturn;\n \t}\n \tnext_idx = cq->cq_ci & mask;\n@@ -1199,7 +1285,8 @@ mlx5_aso_ct_completion_handle(struct mlx5_aso_ct_pools_mng *mng)\n \t\trte_io_wmb();\n \t\tcq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);\n \t}\n-\trte_spinlock_unlock(&sq->sqsl);\n+\tif (need_lock)\n+\t\trte_spinlock_unlock(&sq->sqsl);\n }\n \n /*\n@@ -1207,6 +1294,8 @@ mlx5_aso_ct_completion_handle(struct mlx5_aso_ct_pools_mng *mng)\n  *\n  * @param[in] sh\n  *   Pointer to mlx5_dev_ctx_shared object.\n+ * @param[in] queue\n+ *   The queue index.\n  * @param[in] ct\n  *   Pointer to connection tracking offload object.\n  * @param[in] profile\n@@ -1217,21 +1306,26 @@ mlx5_aso_ct_completion_handle(struct mlx5_aso_ct_pools_mng *mng)\n  */\n int\n mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t  uint32_t queue,\n \t\t\t  struct mlx5_aso_ct_action *ct,\n \t\t\t  const struct rte_flow_action_conntrack *profile)\n {\n \tuint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n-\tstruct mlx5_aso_ct_pool *pool;\n+\tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n+\tstruct mlx5_aso_sq *sq;\n+\tbool need_lock = !!(queue == MLX5_HW_INV_QUEUE);\n \n-\tMLX5_ASSERT(ct);\n+\tif (sh->config.dv_flow_en == 2)\n+\t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n+\telse\n+\t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n \tdo {\n-\t\tmlx5_aso_ct_completion_handle(sh->ct_mng);\n-\t\tif (mlx5_aso_ct_sq_enqueue_single(sh, ct, profile))\n+\t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n+\t\tif (mlx5_aso_ct_sq_enqueue_single(sh, sq, ct, profile, need_lock))\n \t\t\treturn 0;\n \t\t/* Waiting for wqe resource. */\n \t\trte_delay_us_sleep(10u);\n \t} while (--poll_wqe_times);\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n \tDRV_LOG(ERR, \"Fail to send WQE for ASO CT %d in pool %d\",\n \t\tct->offset, pool->index);\n \treturn -1;\n@@ -1242,6 +1336,8 @@ mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n  *\n  * @param[in] sh\n  *   Pointer to mlx5_dev_ctx_shared object.\n+ * @param[in] queue\n+ *   The queue which CT works on..\n  * @param[in] ct\n  *   Pointer to connection tracking offload object.\n  *\n@@ -1249,25 +1345,29 @@ mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,\n  *   0 on success, -1 on failure.\n  */\n int\n-mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,\n+mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh, uint32_t queue,\n \t\t       struct mlx5_aso_ct_action *ct)\n {\n-\tstruct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;\n \tuint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n-\tstruct mlx5_aso_ct_pool *pool;\n+\tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n+\tstruct mlx5_aso_sq *sq;\n+\tbool need_lock = !!(queue == MLX5_HW_INV_QUEUE);\n \n+\tif (sh->config.dv_flow_en == 2)\n+\t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n+\telse\n+\t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n \tif (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==\n \t    ASO_CONNTRACK_READY)\n \t\treturn 0;\n \tdo {\n-\t\tmlx5_aso_ct_completion_handle(mng);\n+\t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n \t\tif (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==\n \t\t    ASO_CONNTRACK_READY)\n \t\t\treturn 0;\n \t\t/* Waiting for CQE ready, consider should block or sleep. */\n \t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n \t} while (--poll_cqe_times);\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n \tDRV_LOG(ERR, \"Fail to poll CQE for ASO CT %d in pool %d\",\n \t\tct->offset, pool->index);\n \treturn -1;\n@@ -1363,18 +1463,24 @@ mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,\n  */\n int\n mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t uint32_t queue,\n \t\t\t struct mlx5_aso_ct_action *ct,\n \t\t\t struct rte_flow_action_conntrack *profile)\n {\n \tuint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n-\tstruct mlx5_aso_ct_pool *pool;\n+\tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n+\tstruct mlx5_aso_sq *sq;\n+\tbool need_lock = !!(queue == MLX5_HW_INV_QUEUE);\n \tchar out_data[64 * 2];\n \tint ret;\n \n-\tMLX5_ASSERT(ct);\n+\tif (sh->config.dv_flow_en == 2)\n+\t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n+\telse\n+\t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n \tdo {\n-\t\tmlx5_aso_ct_completion_handle(sh->ct_mng);\n-\t\tret = mlx5_aso_ct_sq_query_single(sh, ct, out_data);\n+\t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n+\t\tret = mlx5_aso_ct_sq_query_single(sh, sq, ct, out_data, need_lock);\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t\telse if (ret > 0)\n@@ -1383,12 +1489,11 @@ mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n \t\telse\n \t\t\trte_delay_us_sleep(10u);\n \t} while (--poll_wqe_times);\n-\tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n \tDRV_LOG(ERR, \"Fail to send WQE for ASO CT %d in pool %d\",\n \t\tct->offset, pool->index);\n \treturn -1;\n data_handle:\n-\tret = mlx5_aso_ct_wait_ready(sh, ct);\n+\tret = mlx5_aso_ct_wait_ready(sh, queue, ct);\n \tif (!ret)\n \t\tmlx5_aso_ct_obj_analyze(profile, out_data);\n \treturn ret;\n@@ -1408,13 +1513,20 @@ mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,\n  */\n int\n mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,\n+\t\t      uint32_t queue,\n \t\t      struct mlx5_aso_ct_action *ct)\n {\n-\tstruct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;\n+\tstruct mlx5_aso_ct_pool *pool = __mlx5_aso_ct_get_pool(sh, ct);\n+\tstruct mlx5_aso_sq *sq;\n+\tbool need_lock = !!(queue == MLX5_HW_INV_QUEUE);\n \tuint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n \tenum mlx5_aso_ct_state state =\n \t\t\t\t__atomic_load_n(&ct->state, __ATOMIC_RELAXED);\n \n+\tif (sh->config.dv_flow_en == 2)\n+\t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n+\telse\n+\t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n \tif (state == ASO_CONNTRACK_FREE) {\n \t\trte_errno = ENXIO;\n \t\treturn -rte_errno;\n@@ -1423,13 +1535,13 @@ mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,\n \t\treturn 0;\n \t}\n \tdo {\n-\t\tmlx5_aso_ct_completion_handle(mng);\n+\t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n \t\tstate = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);\n \t\tif (state == ASO_CONNTRACK_READY ||\n \t\t    state == ASO_CONNTRACK_QUERY)\n \t\t\treturn 0;\n-\t\t/* Waiting for CQE ready, consider should block or sleep. */\n-\t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n+\t\t/* Waiting for CQE ready, consider should block or sleep.  */\n+\t\trte_delay_us_block(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n \t} while (--poll_cqe_times);\n \trte_errno = EBUSY;\n \treturn -rte_errno;\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 1eb1ce659f..9bede7c04f 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -12813,6 +12813,7 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,\n \tstruct mlx5_devx_obj *obj = NULL;\n \tuint32_t i;\n \tuint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);\n+\tsize_t mem_size;\n \n \tobj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,\n \t\t\t\t\t\t\t  priv->sh->cdev->pdn,\n@@ -12822,7 +12823,10 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,\n \t\tDRV_LOG(ERR, \"Failed to create conn_track_offload_obj using DevX.\");\n \t\treturn NULL;\n \t}\n-\tpool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);\n+\tmem_size = sizeof(struct mlx5_aso_ct_action) *\n+\t\t   MLX5_ASO_CT_ACTIONS_PER_POOL +\n+\t\t   sizeof(*pool);\n+\tpool = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);\n \tif (!pool) {\n \t\trte_errno = ENOMEM;\n \t\tclaim_zero(mlx5_devx_cmd_destroy(obj));\n@@ -12962,10 +12966,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"Failed to allocate CT object\");\n \tct = flow_aso_ct_get_by_dev_idx(dev, idx);\n-\tif (mlx5_aso_ct_update_by_wqe(sh, ct, pro))\n-\t\treturn rte_flow_error_set(error, EBUSY,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n-\t\t\t\t\t  \"Failed to update CT\");\n+\tif (mlx5_aso_ct_update_by_wqe(sh, MLX5_HW_INV_QUEUE, ct, pro)) {\n+\t\tflow_dv_aso_ct_dev_release(dev, idx);\n+\t\trte_flow_error_set(error, EBUSY,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t   \"Failed to update CT\");\n+\t\treturn 0;\n+\t}\n \tct->is_original = !!pro->is_original_dir;\n \tct->peer = pro->peer_port;\n \treturn idx;\n@@ -14160,7 +14167,7 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t\tNULL,\n \t\t\t\t\t\t\"Failed to get CT object.\");\n-\t\t\tif (mlx5_aso_ct_available(priv->sh, ct))\n+\t\t\tif (mlx5_aso_ct_available(priv->sh, MLX5_HW_INV_QUEUE, ct))\n \t\t\t\treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t\tNULL,\n@@ -15768,14 +15775,15 @@ __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,\n \t\tret = mlx5_validate_action_ct(dev, new_prf, error);\n \t\tif (ret)\n \t\t\treturn ret;\n-\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);\n+\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE,\n+\t\t\t\t\t\tct, new_prf);\n \t\tif (ret)\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\tNULL,\n \t\t\t\t\t\"Failed to send CT context update WQE\");\n-\t\t/* Block until ready or a failure. */\n-\t\tret = mlx5_aso_ct_available(priv->sh, ct);\n+\t\t/* Block until ready or a failure, default is asynchronous. */\n+\t\tret = mlx5_aso_ct_available(priv->sh, MLX5_HW_INV_QUEUE, ct);\n \t\tif (ret)\n \t\t\trte_flow_error_set(error, rte_errno,\n \t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -16604,7 +16612,7 @@ flow_dv_action_query(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\tct->peer;\n \t\t((struct rte_flow_action_conntrack *)data)->is_original_dir =\n \t\t\t\t\t\t\tct->is_original;\n-\t\tif (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))\n+\t\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct, data))\n \t\t\treturn rte_flow_error_set(error, EIO,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\tNULL,\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 91835cd024..f4340c475d 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -15,6 +15,14 @@\n /* The maximum actions support in the flow. */\n #define MLX5_HW_MAX_ACTS 16\n \n+/*\n+ * The default ipool threshold value indicates which per_core_cache\n+ * value to set.\n+ */\n+#define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)\n+/* The default min local cache size. */\n+#define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)\n+\n /* Default push burst threshold. */\n #define BURST_THR 32u\n \n@@ -324,6 +332,25 @@ flow_hw_tir_action_register(struct rte_eth_dev *dev,\n \treturn hrxq;\n }\n \n+static __rte_always_inline int\n+flow_hw_ct_compile(struct rte_eth_dev *dev,\n+\t\t   uint32_t queue, uint32_t idx,\n+\t\t   struct mlx5dr_rule_action *rule_act)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_action *ct;\n+\n+\tct = mlx5_ipool_get(priv->hws_ctpool->cts, MLX5_ACTION_CTX_CT_GET_IDX(idx));\n+\tif (!ct || mlx5_aso_ct_available(priv->sh, queue, ct))\n+\t\treturn -1;\n+\trule_act->action = priv->hws_ctpool->dr_action;\n+\trule_act->aso_ct.offset = ct->offset;\n+\trule_act->aso_ct.direction = ct->is_original ?\n+\t\tMLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :\n+\t\tMLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;\n+\treturn 0;\n+}\n+\n /**\n  * Destroy DR actions created by action template.\n  *\n@@ -640,6 +667,11 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,\n \t\t\taction_src, action_dst, act_idx))\n \t\t\treturn -1;\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\tif (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,\n+\t\t\t\t       idx, &acts->rule_acts[action_dst]))\n+\t\t\treturn -1;\n+\t\tbreak;\n \tdefault:\n \t\tDRV_LOG(WARNING, \"Unsupported shared action type:%d\", type);\n \t\tbreak;\n@@ -1083,6 +1115,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \tbool reformat_used = false;\n \tuint16_t action_pos;\n \tuint16_t jump_pos;\n+\tuint32_t ct_idx;\n \tint err;\n \n \tflow_hw_modify_field_init(&mhdr, at);\n@@ -1305,6 +1338,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\tgoto err;\n \t\t\t}\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t\taction_pos = at->actions_off[actions - at->actions];\n+\t\t\tif (masks->conf) {\n+\t\t\t\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t\t ((uint32_t)(uintptr_t)actions->conf);\n+\t\t\t\tif (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,\n+\t\t\t\t\t\t       &acts->rule_acts[action_pos]))\n+\t\t\t\t\tgoto err;\n+\t\t\t} else if (__flow_hw_act_data_general_append\n+\t\t\t\t\t(priv, acts, actions->type,\n+\t\t\t\t\t actions - action_start, action_pos)) {\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -1479,6 +1526,8 @@ flow_hw_shared_action_get(struct rte_eth_dev *dev,\n  *\n  * @param[in] dev\n  *   Pointer to the rte_eth_dev data structure.\n+ * @param[in] queue\n+ *   The flow creation queue index.\n  * @param[in] action\n  *   Pointer to the shared indirect rte_flow action.\n  * @param[in] table\n@@ -1492,7 +1541,7 @@ flow_hw_shared_action_get(struct rte_eth_dev *dev,\n  *    0 on success, negative value otherwise and rte_errno is set.\n  */\n static __rte_always_inline int\n-flow_hw_shared_action_construct(struct rte_eth_dev *dev,\n+flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\tconst struct rte_flow_action *action,\n \t\t\t\tstruct rte_flow_template_table *table,\n \t\t\t\tconst uint8_t it_idx,\n@@ -1532,6 +1581,10 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev,\n \t\t\t\t&rule_act->counter.offset))\n \t\t\treturn -1;\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\tif (flow_hw_ct_compile(dev, queue, idx, rule_act))\n+\t\t\treturn -1;\n+\t\tbreak;\n \tdefault:\n \t\tDRV_LOG(WARNING, \"Unsupported shared action type:%d\", type);\n \t\tbreak;\n@@ -1727,6 +1780,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\tuint64_t item_flags;\n \t\tstruct mlx5_hw_jump_action *jump;\n \t\tstruct mlx5_hrxq *hrxq;\n+\t\tuint32_t ct_idx;\n \t\tcnt_id_t cnt_id;\n \n \t\taction = &actions[act_data->action_src];\n@@ -1735,7 +1789,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\tswitch (act_data->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tif (flow_hw_shared_action_construct\n-\t\t\t\t\t(dev, action, table, it_idx,\n+\t\t\t\t\t(dev, queue, action, table, it_idx,\n \t\t\t\t\t &rule_acts[act_data->action_dst]))\n \t\t\t\treturn -1;\n \t\t\tbreak;\n@@ -1860,6 +1914,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\treturn ret;\n \t\t\tjob->flow->cnt_id = act_data->shared_counter.id;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX\n+\t\t\t\t ((uint32_t)(uintptr_t)action->conf);\n+\t\t\tif (flow_hw_ct_compile(dev, queue, ct_idx,\n+\t\t\t\t\t       &rule_acts[act_data->action_dst]))\n+\t\t\t\treturn -1;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n@@ -2391,6 +2452,8 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \tif (nb_flows < cfg.trunk_size) {\n \t\tcfg.per_core_cache = 0;\n \t\tcfg.trunk_size = nb_flows;\n+\t} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {\n+\t\tcfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;\n \t}\n \t/* Check if we requires too many templates. */\n \tif (nb_item_templates > max_tpl ||\n@@ -2927,6 +2990,9 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n \t\t\t/* TODO: Validation logic */\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t\t/* TODO: Validation logic */\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -2953,6 +3019,7 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {\n \t[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,\n \t[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,\n \t[RTE_FLOW_ACTION_TYPE_COUNT] = MLX5DR_ACTION_TYP_CTR,\n+\t[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,\n };\n \n static int\n@@ -2981,6 +3048,11 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,\n \t\taction_types[*curr_off] = MLX5DR_ACTION_TYP_CTR;\n \t\t*curr_off = *curr_off + 1;\n \t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\tat->actions_off[action_src] = *curr_off;\n+\t\taction_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;\n+\t\t*curr_off = *curr_off + 1;\n+\t\tbreak;\n \tdefault:\n \t\tDRV_LOG(WARNING, \"Unsupported shared action type: %d\", type);\n \t\treturn -EINVAL;\n@@ -3435,6 +3507,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n \t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n \t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n+\t\tcase RTE_FLOW_ITEM_TYPE_CONNTRACK:\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_END:\n \t\t\titems_end = true;\n@@ -4630,6 +4703,97 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n \treturn -EINVAL;\n }\n \n+static void\n+flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,\n+\t\t       struct mlx5_aso_ct_pools_mng *ct_mng)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tmlx5_aso_ct_queue_uninit(priv->sh, ct_mng);\n+\tmlx5_free(ct_mng);\n+}\n+\n+static void\n+flow_hw_ct_pool_destroy(struct rte_eth_dev *dev __rte_unused,\n+\t\t\tstruct mlx5_aso_ct_pool *pool)\n+{\n+\tif (pool->dr_action)\n+\t\tmlx5dr_action_destroy(pool->dr_action);\n+\tif (pool->devx_obj)\n+\t\tclaim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));\n+\tif (pool->cts)\n+\t\tmlx5_ipool_destroy(pool->cts);\n+\tmlx5_free(pool);\n+}\n+\n+static struct mlx5_aso_ct_pool *\n+flow_hw_ct_pool_create(struct rte_eth_dev *dev,\n+\t\t       const struct rte_flow_port_attr *port_attr)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool;\n+\tstruct mlx5_devx_obj *obj;\n+\tuint32_t nb_cts = rte_align32pow2(port_attr->nb_conn_tracks);\n+\tuint32_t log_obj_size = rte_log2_u32(nb_cts);\n+\tstruct mlx5_indexed_pool_config cfg = {\n+\t\t.size = sizeof(struct mlx5_aso_ct_action),\n+\t\t.trunk_size = 1 << 12,\n+\t\t.per_core_cache = 1 << 13,\n+\t\t.need_lock = 1,\n+\t\t.release_mem_en = !!priv->sh->config.reclaim_mode,\n+\t\t.malloc = mlx5_malloc,\n+\t\t.free = mlx5_free,\n+\t\t.type = \"mlx5_hw_ct_action\",\n+\t};\n+\tint reg_id;\n+\tuint32_t flags;\n+\n+\tpool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);\n+\tif (!pool) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tobj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,\n+\t\t\t\t\t\t\t  priv->sh->cdev->pdn,\n+\t\t\t\t\t\t\t  log_obj_size);\n+\tif (!obj) {\n+\t\trte_errno = ENODATA;\n+\t\tDRV_LOG(ERR, \"Failed to create conn_track_offload_obj using DevX.\");\n+\t\tgoto err;\n+\t}\n+\tpool->devx_obj = obj;\n+\treg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);\n+\tflags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;\n+\tif (priv->sh->config.dv_esw_en && priv->master)\n+\t\tflags |= MLX5DR_ACTION_FLAG_HWS_FDB;\n+\tpool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,\n+\t\t\t\t\t\t      (struct mlx5dr_devx_obj *)obj,\n+\t\t\t\t\t\t      reg_id - REG_C_0, flags);\n+\tif (!pool->dr_action)\n+\t\tgoto err;\n+\t/*\n+\t * No need for local cache if CT number is a small number. Since\n+\t * flow insertion rate will be very limited in that case. Here let's\n+\t * set the number to less than default trunk size 4K.\n+\t */\n+\tif (nb_cts <= cfg.trunk_size) {\n+\t\tcfg.per_core_cache = 0;\n+\t\tcfg.trunk_size = nb_cts;\n+\t} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {\n+\t\tcfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;\n+\t}\n+\tpool->cts = mlx5_ipool_create(&cfg);\n+\tif (!pool->cts)\n+\t\tgoto err;\n+\tpool->sq = priv->ct_mng->aso_sqs;\n+\t/* Assign the last extra ASO SQ as public SQ. */\n+\tpool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];\n+\treturn pool;\n+err:\n+\tflow_hw_ct_pool_destroy(dev, pool);\n+\treturn NULL;\n+}\n+\n /**\n  * Configure port HWS resources.\n  *\n@@ -4815,6 +4979,20 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t}\n \tif (_queue_attr)\n \t\tmlx5_free(_queue_attr);\n+\tif (port_attr->nb_conn_tracks) {\n+\t\tmem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +\n+\t\t\t   sizeof(*priv->ct_mng);\n+\t\tpriv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,\n+\t\t\t\t\t   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\tif (!priv->ct_mng)\n+\t\t\tgoto err;\n+\t\tif (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated))\n+\t\t\tgoto err;\n+\t\tpriv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);\n+\t\tif (!priv->hws_ctpool)\n+\t\t\tgoto err;\n+\t\tpriv->sh->ct_aso_en = 1;\n+\t}\n \tif (port_attr->nb_counters) {\n \t\tpriv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,\n \t\t\t\tnb_queue);\n@@ -4823,6 +5001,14 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t}\n \treturn 0;\n err:\n+\tif (priv->hws_ctpool) {\n+\t\tflow_hw_ct_pool_destroy(dev, priv->hws_ctpool);\n+\t\tpriv->hws_ctpool = NULL;\n+\t}\n+\tif (priv->ct_mng) {\n+\t\tflow_hw_ct_mng_destroy(dev, priv->ct_mng);\n+\t\tpriv->ct_mng = NULL;\n+\t}\n \tflow_hw_free_vport_actions(priv);\n \tfor (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {\n \t\tif (priv->hw_drop[i])\n@@ -4896,6 +5082,14 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \t}\n \tif (priv->hws_cpool)\n \t\tmlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);\n+\tif (priv->hws_ctpool) {\n+\t\tflow_hw_ct_pool_destroy(dev, priv->hws_ctpool);\n+\t\tpriv->hws_ctpool = NULL;\n+\t}\n+\tif (priv->ct_mng) {\n+\t\tflow_hw_ct_mng_destroy(dev, priv->ct_mng);\n+\t\tpriv->ct_mng = NULL;\n+\t}\n \tmlx5_free(priv->hw_q);\n \tpriv->hw_q = NULL;\n \tclaim_zero(mlx5dr_context_close(priv->dr_ctx));\n@@ -4964,6 +5158,7 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev)\n \t\tunset |= 1 << (REG_C_1 - REG_C_0);\n \tmasks &= ~unset;\n \tif (mlx5_flow_hw_avl_tags_init_cnt) {\n+\t\tMLX5_ASSERT(mlx5_flow_hw_aso_tag == priv->mtr_color_reg);\n \t\tfor (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) {\n \t\t\tif (mlx5_flow_hw_avl_tags[i] != REG_NON && !!((1 << i) & masks)) {\n \t\t\t\tcopy[mlx5_flow_hw_avl_tags[i] - REG_C_0] =\n@@ -4986,6 +5181,7 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \tpriv->sh->hws_tags = 1;\n+\tmlx5_flow_hw_aso_tag = (enum modify_reg)priv->mtr_color_reg;\n \tmlx5_flow_hw_avl_tags_init_cnt++;\n }\n \n@@ -5056,6 +5252,170 @@ flow_hw_clear_flow_metadata_config(void)\n \tmlx5_flow_hw_flow_metadata_xmeta_en = 0;\n }\n \n+static int\n+flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,\n+\t\t\t  uint32_t idx,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n+\tuint32_t ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n+\tstruct rte_eth_dev *owndev = &rte_eth_devices[owner];\n+\tstruct mlx5_priv *priv = owndev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\n+\tct = mlx5_ipool_get(pool->cts, ct_idx);\n+\tif (!ct) {\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Invalid CT destruction index\");\n+\t}\n+\t__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,\n+\t\t\t\t __ATOMIC_RELAXED);\n+\tmlx5_ipool_free(pool->cts, ct_idx);\n+\treturn 0;\n+}\n+\n+static int\n+flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,\n+\t\t\tstruct rte_flow_action_conntrack *profile,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n+\tuint32_t ct_idx;\n+\n+\tif (owner != PORT_ID(priv))\n+\t\treturn rte_flow_error_set(error, EACCES,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Can't query CT object owned by another port\");\n+\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n+\tct = mlx5_ipool_get(pool->cts, ct_idx);\n+\tif (!ct) {\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Invalid CT query index\");\n+\t}\n+\tprofile->peer_port = ct->peer;\n+\tprofile->is_original_dir = ct->is_original;\n+\tif (mlx5_aso_ct_query_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ct, profile))\n+\t\treturn rte_flow_error_set(error, EIO,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Failed to query CT context\");\n+\treturn 0;\n+}\n+\n+\n+static int\n+flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t const struct rte_flow_modify_conntrack *action_conf,\n+\t\t\t uint32_t idx, struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\tconst struct rte_flow_action_conntrack *new_prf;\n+\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n+\tuint32_t ct_idx;\n+\tint ret = 0;\n+\n+\tif (PORT_ID(priv) != owner)\n+\t\treturn rte_flow_error_set(error, EACCES,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"Can't update CT object owned by another port\");\n+\tct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n+\tct = mlx5_ipool_get(pool->cts, ct_idx);\n+\tif (!ct) {\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Invalid CT update index\");\n+\t}\n+\tnew_prf = &action_conf->new_ct;\n+\tif (action_conf->direction)\n+\t\tct->is_original = !!new_prf->is_original_dir;\n+\tif (action_conf->state) {\n+\t\t/* Only validate the profile when it needs to be updated. */\n+\t\tret = mlx5_validate_action_ct(dev, new_prf, error);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf);\n+\t\tif (ret)\n+\t\t\treturn rte_flow_error_set(error, EIO,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\tNULL,\n+\t\t\t\t\t\"Failed to send CT context update WQE\");\n+\t\tif (queue != MLX5_HW_INV_QUEUE)\n+\t\t\treturn 0;\n+\t\t/* Block until ready or a failure in synchronous mode. */\n+\t\tret = mlx5_aso_ct_available(priv->sh, queue, ct);\n+\t\tif (ret)\n+\t\t\trte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"Timeout to get the CT update\");\n+\t}\n+\treturn ret;\n+}\n+\n+static struct rte_flow_action_handle *\n+flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t const struct rte_flow_action_conntrack *pro,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n+\tstruct mlx5_aso_ct_action *ct;\n+\tuint32_t ct_idx = 0;\n+\tint ret;\n+\tbool async = !!(queue != MLX5_HW_INV_QUEUE);\n+\n+\tif (!pool) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t   \"CT is not enabled\");\n+\t\treturn 0;\n+\t}\n+\tct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);\n+\tif (!ct) {\n+\t\trte_flow_error_set(error, rte_errno,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t   \"Failed to allocate CT object\");\n+\t\treturn 0;\n+\t}\n+\tct->offset = ct_idx - 1;\n+\tct->is_original = !!pro->is_original_dir;\n+\tct->peer = pro->peer_port;\n+\tct->pool = pool;\n+\tif (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro)) {\n+\t\tmlx5_ipool_free(pool->cts, ct_idx);\n+\t\trte_flow_error_set(error, EBUSY,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t   \"Failed to update CT\");\n+\t\treturn 0;\n+\t}\n+\tif (!async) {\n+\t\tret = mlx5_aso_ct_available(priv->sh, queue, ct);\n+\t\tif (ret) {\n+\t\t\tmlx5_ipool_free(pool->cts, ct_idx);\n+\t\t\trte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"Timeout to get the CT update\");\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\treturn (struct rte_flow_action_handle *)(uintptr_t)\n+\t\tMLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);\n+}\n+\n /**\n  * Create shared action.\n  *\n@@ -5103,6 +5463,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\thandle = (struct rte_flow_action_handle *)\n \t\t\t\t (uintptr_t)cnt_id;\n \t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\thandle = flow_hw_conntrack_create(dev, queue, action->conf, error);\n+\t\tbreak;\n \tdefault:\n \t\thandle = flow_dv_action_create(dev, conf, action, error);\n \t}\n@@ -5138,10 +5501,18 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t     void *user_data,\n \t\t\t     struct rte_flow_error *error)\n {\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n+\tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n+\n \tRTE_SET_USED(queue);\n \tRTE_SET_USED(attr);\n \tRTE_SET_USED(user_data);\n-\treturn flow_dv_action_update(dev, handle, update, error);\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\treturn flow_hw_conntrack_update(dev, queue, update, act_idx, error);\n+\tdefault:\n+\t\treturn flow_dv_action_update(dev, handle, update, error);\n+\t}\n }\n \n /**\n@@ -5180,6 +5551,8 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\treturn mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\treturn flow_hw_conntrack_destroy(dev, act_idx, error);\n \tdefault:\n \t\treturn flow_dv_action_destroy(dev, handle, error);\n \t}\n@@ -5333,6 +5706,8 @@ flow_hw_action_query(struct rte_eth_dev *dev,\n \tswitch (type) {\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\treturn flow_hw_query_counter(dev, act_idx, data, error);\n+\tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n+\t\treturn flow_hw_conntrack_query(dev, act_idx, data, error);\n \tdefault:\n \t\treturn flow_dv_action_query(dev, handle, data, error);\n \t}\n",
    "prefixes": [
        "v3",
        "10/17"
    ]
}