get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/137103/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 137103,
    "url": "http://patches.dpdk.org/api/patches/137103/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20240223142320.49470-4-dsosnowski@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240223142320.49470-4-dsosnowski@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240223142320.49470-4-dsosnowski@nvidia.com",
    "date": "2024-02-23T14:23:19",
    "name": "[v2,3/4] net/mlx5: add cross port CT object sharing",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "bec128d4099792b0022773113224232eba07db21",
    "submitter": {
        "id": 2386,
        "url": "http://patches.dpdk.org/api/people/2386/?format=api",
        "name": "Dariusz Sosnowski",
        "email": "dsosnowski@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20240223142320.49470-4-dsosnowski@nvidia.com/mbox/",
    "series": [
        {
            "id": 31203,
            "url": "http://patches.dpdk.org/api/series/31203/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31203",
            "date": "2024-02-23T14:23:16",
            "name": "net/mlx5: connection tracking changes",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/31203/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/137103/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/137103/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 08E5443BA9;\n\tFri, 23 Feb 2024 15:24:25 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 35D7742D26;\n\tFri, 23 Feb 2024 15:24:13 +0100 (CET)",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12on2052.outbound.protection.outlook.com [40.107.244.52])\n by mails.dpdk.org (Postfix) with ESMTP id AA1104281D\n for <dev@dpdk.org>; Fri, 23 Feb 2024 15:24:10 +0100 (CET)",
            "from PH8PR20CA0016.namprd20.prod.outlook.com (2603:10b6:510:23c::20)\n by BN9PR12MB5196.namprd12.prod.outlook.com (2603:10b6:408:11d::17)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7316.24; Fri, 23 Feb\n 2024 14:24:07 +0000",
            "from SN1PEPF000252A4.namprd05.prod.outlook.com\n (2603:10b6:510:23c:cafe::8d) by PH8PR20CA0016.outlook.office365.com\n (2603:10b6:510:23c::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7292.43 via Frontend\n Transport; Fri, 23 Feb 2024 14:24:06 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n SN1PEPF000252A4.mail.protection.outlook.com (10.167.242.11) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7292.25 via Frontend Transport; Fri, 23 Feb 2024 14:24:06 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Fri, 23 Feb\n 2024 06:23:46 -0800",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Fri, 23 Feb\n 2024 06:23:41 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=c6II/6WNJceiBIwxuoeR+NWFtawy1h0AyYEAf8fYKvrgk1SOEqaPgqmETVgMDpI1mwm6rOhJ3/XlHt1/f8NnjaEeY+zYT3V7E8KO/lffILCQ6ShKTohY1sy6xdAeieL6rVm1glMKgchuPUyvkc/LNUDyWt1rlja3VwaVI3PE1O9HWFXdOAF4v2zrtrHesl9S7Fx1a1kjCp+7bnIM6vr5A2ZmdM4K+R4sylD+9wdB9TRATM5HGoWYhpQ1iJLA0SPWFzu/1T+MzTGOFf3p8fgeBBBg1CcA6Fe3Dvh8M5LcpS0niG/Am+tWPc88xlAfsk7i0A1CzeNsL9fpd+2m6TdBmw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=dAirZCofblIwXLu/3zZuf1fT0Tc2O9120q7779IRhVE=;\n b=O8cJ662VNq3pulnkqaozIBdOwjZdiQWmbLkSdyE4s+X54DeUCMvbl91oKkENu56wTgLB44JKnh2U5d5/8G198+9OHkAjNlTnzKyWWXMeT8mcDWoNMzH2tenDIANklLOfrTZJUDPJbd7F/1nV+YhGOet5PV94x6OD3VhzllG9VTXMsECxvr5+XAROMRUDjrrv74joO0koaxwtmIPm6urOcISXFugB118907Ws1MK5csVITNkBaneMgYH28CanTRrv6/cZ5kbEjRjl+SPcQwR/gdHkWJjxMXlmqPPUpllLUNWtjznzJDvxGi16pnzav6Tz4prm9BaUfX+hxvxoJUQY3w==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=dAirZCofblIwXLu/3zZuf1fT0Tc2O9120q7779IRhVE=;\n b=hYLM6h91YscZMlNN0HH3lhXM+sRGUL073UIj3ifGHoWBPScVo1gi9yIppeFcscKw7S8b56V63M/fIcWsR3HTykMJo4ub6TYLM6xldMm3TxghPYO8puQ52P9fA7rgoCnXQUliKSh4Xc6Hha8z2WdS5/0IRlkmxXY5Q2vTPpOfswRQC17vpMXcZTfv5l5vi+dvUPcQUJzzAQ/B/548WT2zSRlwVH43I1OcdkGdKlE7S58iiYOD828aSYBLN4eYPdxPWe/lIzCBwjxcwJ1ounkf0/2WorbngovoJ6P7hd4ZLHO0vQBJmSTU1ITnQ/K5TzQBK3vw6sOzL5UtfHo/sjqMUQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Dariusz Sosnowski <dsosnowski@nvidia.com>",
        "To": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>",
        "Subject": "[PATCH v2 3/4] net/mlx5: add cross port CT object sharing",
        "Date": "Fri, 23 Feb 2024 16:23:19 +0200",
        "Message-ID": "<20240223142320.49470-4-dsosnowski@nvidia.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20240223142320.49470-1-dsosnowski@nvidia.com>",
        "References": "<20240221100145.8234-1-dsosnowski@nvidia.com>\n <20240223142320.49470-1-dsosnowski@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "SN1PEPF000252A4:EE_|BN9PR12MB5196:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "b2be1cde-860f-448a-490f-08dc347b17b9",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 5ZXxSFx/6RIDGEg4IXGY1FYjgYV8mPeFtHph00TEDFV4ncEc9EJperYMoNF1sWUpWz6JRvLY/0gLamELUuCN1z8FAljuekAZ0CWXgVZDAZtSHk8q1yeHgOUwX6/Xy0J9d23TK5++AkH6BryznOQ/VieVAqqt/paBxT5RCWIdVAs2G7IJesQUoJgDMiDs1tX/OOy79uHBW8etvJVsVNP6fkoqiQp1yAxh5i+JvJCle96ezUdkg5ETrhWNgCthL5q2A9UdXtbJdxOC99Mp2ZWDbsBOfzn4SOVIMxn5RUNy8zwLXtenz/aQan/98liO2RiTpW3fz4IREPb1uWKHFKGCAsZsIKJ4/4qhTxMqzs3l+C9cxcLJh8gHIONQprK4Qm8Ep51lQfM0R9WTpbmRA4KErgypxg6nX1oSlCdVxs7Aj5MVAqCY+fWMVEePdTU8CRTCkwRsoX7LAd5aOOg1ZFCfMeVWAzPGVxrkRw5oAylzpD6PqVSJACUcNxtwZoFUKfRSiCQX2YCX7dLu5Y6Ar0RrD/HqWYccLOS54/GVRcq+CXBfj2f053wKnZPtBP27J2aMpaF5AUjr6hgK59nFnAxpiMegduBZ/qR/NxpCKewcps1vlL3Dv8h+c+WLt+5SRnigFVSgVKXo78HKZr9fuEKy15C2XQAvJjgbgT8kiWMIvl0=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(36860700004)(40470700004)(46966006); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "23 Feb 2024 14:24:06.1533 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b2be1cde-860f-448a-490f-08dc347b17b9",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n SN1PEPF000252A4.namprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN9PR12MB5196",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Suanming Mou <suanmingm@nvidia.com>\n\nThis commit adds cross port CT object sharing.\n\nShared CT object shares the same DevX objects, but allocate port's\nown action locally. Once the CT object is shared between two flows\nin different ports, the two flows use their own local action with\nthe same offset index.\n\nThe shared CT object can only be created/updated/queried/destroyed\nby host port.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nSigned-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\nAcked-by: Ori Kam <orika@nvidia.com>\n---\n doc/guides/rel_notes/release_24_03.rst |   2 +\n drivers/net/mlx5/mlx5_flow_hw.c        | 145 ++++++++++++++-----------\n 2 files changed, 85 insertions(+), 62 deletions(-)",
    "diff": "diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst\nindex 879bb4944c..b660c2c7cf 100644\n--- a/doc/guides/rel_notes/release_24_03.rst\n+++ b/doc/guides/rel_notes/release_24_03.rst\n@@ -130,6 +130,8 @@ New Features\n   * Added support for matching a random value.\n   * Added support for comparing result between packet fields or value.\n   * Added support for accumulating value of field into another one.\n+  * Added support for sharing indirect action objects of type ``RTE_FLOW_ACTION_TYPE_CONNTRACK``\n+    with HW steering flow engine.\n \n * **Updated Marvell cnxk crypto driver.**\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 366a6956d2..f53ed1144b 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -564,7 +564,7 @@ flow_hw_ct_compile(struct rte_eth_dev *dev,\n \tstruct mlx5_aso_ct_action *ct;\n \n \tct = mlx5_ipool_get(priv->hws_ctpool->cts, MLX5_ACTION_CTX_CT_GET_IDX(idx));\n-\tif (!ct || mlx5_aso_ct_available(priv->sh, queue, ct))\n+\tif (!ct || (!priv->shared_host && mlx5_aso_ct_available(priv->sh, queue, ct)))\n \t\treturn -1;\n \trule_act->action = priv->hws_ctpool->dr_action;\n \trule_act->aso_ct.offset = ct->offset;\n@@ -3835,9 +3835,11 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,\n \tif (ret_comp < n_res && priv->hws_mpool)\n \t\tret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],\n \t\t\t\t&res[ret_comp], n_res - ret_comp);\n-\tif (ret_comp < n_res && priv->hws_ctpool)\n-\t\tret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],\n-\t\t\t\t&res[ret_comp], n_res - ret_comp);\n+\tif (!priv->shared_host) {\n+\t\tif (ret_comp < n_res && priv->hws_ctpool)\n+\t\t\tret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],\n+\t\t\t\t\t&res[ret_comp], n_res - ret_comp);\n+\t}\n \tif (ret_comp < n_res && priv->quota_ctx.sq)\n \t\tret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],\n \t\t\t\t\t\t     &res[ret_comp],\n@@ -8797,15 +8799,19 @@ flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,\n }\n \n static void\n-flow_hw_ct_pool_destroy(struct rte_eth_dev *dev __rte_unused,\n+flow_hw_ct_pool_destroy(struct rte_eth_dev *dev,\n \t\t\tstruct mlx5_aso_ct_pool *pool)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n \tif (pool->dr_action)\n \t\tmlx5dr_action_destroy(pool->dr_action);\n-\tif (pool->devx_obj)\n-\t\tclaim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));\n-\tif (pool->cts)\n-\t\tmlx5_ipool_destroy(pool->cts);\n+\tif (!priv->shared_host) {\n+\t\tif (pool->devx_obj)\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));\n+\t\tif (pool->cts)\n+\t\t\tmlx5_ipool_destroy(pool->cts);\n+\t}\n \tmlx5_free(pool);\n }\n \n@@ -8829,51 +8835,56 @@ flow_hw_ct_pool_create(struct rte_eth_dev *dev,\n \t\t.type = \"mlx5_hw_ct_action\",\n \t};\n \tint reg_id;\n-\tuint32_t flags;\n+\tuint32_t flags = 0;\n \n-\tif (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {\n-\t\tDRV_LOG(ERR, \"Connection tracking is not supported \"\n-\t\t\t     \"in cross vHCA sharing mode\");\n-\t\trte_errno = ENOTSUP;\n-\t\treturn NULL;\n-\t}\n \tpool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);\n \tif (!pool) {\n \t\trte_errno = ENOMEM;\n \t\treturn NULL;\n \t}\n-\tobj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,\n-\t\t\t\t\t\t\t  priv->sh->cdev->pdn,\n-\t\t\t\t\t\t\t  log_obj_size);\n-\tif (!obj) {\n-\t\trte_errno = ENODATA;\n-\t\tDRV_LOG(ERR, \"Failed to create conn_track_offload_obj using DevX.\");\n-\t\tgoto err;\n+\tif (!priv->shared_host) {\n+\t\t/*\n+\t\t * No need for local cache if CT number is a small number. Since\n+\t\t * flow insertion rate will be very limited in that case. Here let's\n+\t\t * set the number to less than default trunk size 4K.\n+\t\t */\n+\t\tif (nb_cts <= cfg.trunk_size) {\n+\t\t\tcfg.per_core_cache = 0;\n+\t\t\tcfg.trunk_size = nb_cts;\n+\t\t} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {\n+\t\t\tcfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;\n+\t\t}\n+\t\tcfg.max_idx = nb_cts;\n+\t\tpool->cts = mlx5_ipool_create(&cfg);\n+\t\tif (!pool->cts)\n+\t\t\tgoto err;\n+\t\tobj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,\n+\t\t\t\t\t\t\t\t  priv->sh->cdev->pdn,\n+\t\t\t\t\t\t\t\t  log_obj_size);\n+\t\tif (!obj) {\n+\t\t\trte_errno = ENODATA;\n+\t\t\tDRV_LOG(ERR, \"Failed to create conn_track_offload_obj using DevX.\");\n+\t\t\tgoto err;\n+\t\t}\n+\t\tpool->devx_obj = obj;\n+\t} else {\n+\t\tstruct rte_eth_dev *host_dev = priv->shared_host;\n+\t\tstruct mlx5_priv *host_priv = host_dev->data->dev_private;\n+\n+\t\tpool->devx_obj = host_priv->hws_ctpool->devx_obj;\n+\t\tpool->cts = host_priv->hws_ctpool->cts;\n+\t\tMLX5_ASSERT(pool->cts);\n+\t\tMLX5_ASSERT(!port_attr->nb_conn_tracks);\n \t}\n-\tpool->devx_obj = obj;\n \treg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);\n-\tflags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;\n+\tflags |= MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;\n \tif (priv->sh->config.dv_esw_en && priv->master)\n \t\tflags |= MLX5DR_ACTION_FLAG_HWS_FDB;\n \tpool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,\n-\t\t\t\t\t\t      (struct mlx5dr_devx_obj *)obj,\n+\t\t\t\t\t\t      (struct mlx5dr_devx_obj *)pool->devx_obj,\n \t\t\t\t\t\t      reg_id - REG_C_0, flags);\n \tif (!pool->dr_action)\n \t\tgoto err;\n-\t/*\n-\t * No need for local cache if CT number is a small number. Since\n-\t * flow insertion rate will be very limited in that case. Here let's\n-\t * set the number to less than default trunk size 4K.\n-\t */\n-\tif (nb_cts <= cfg.trunk_size) {\n-\t\tcfg.per_core_cache = 0;\n-\t\tcfg.trunk_size = nb_cts;\n-\t} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {\n-\t\tcfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;\n-\t}\n-\tpool->cts = mlx5_ipool_create(&cfg);\n-\tif (!pool->cts)\n-\t\tgoto err;\n \tpool->sq = priv->ct_mng->aso_sqs;\n \t/* Assign the last extra ASO SQ as public SQ. */\n \tpool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];\n@@ -9686,14 +9697,16 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \tif (!priv->shared_host)\n \t\tflow_hw_create_send_to_kernel_actions(priv);\n \tif (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {\n-\t\tmem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +\n-\t\t\t   sizeof(*priv->ct_mng);\n-\t\tpriv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,\n-\t\t\t\t\t   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n-\t\tif (!priv->ct_mng)\n-\t\t\tgoto err;\n-\t\tif (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated))\n-\t\t\tgoto err;\n+\t\tif (!priv->shared_host) {\n+\t\t\tmem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +\n+\t\t\t\tsizeof(*priv->ct_mng);\n+\t\t\tpriv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\t\tif (!priv->ct_mng)\n+\t\t\t\tgoto err;\n+\t\t\tif (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated))\n+\t\t\t\tgoto err;\n+\t\t}\n \t\tpriv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);\n \t\tif (!priv->hws_ctpool)\n \t\t\tgoto err;\n@@ -9914,17 +9927,20 @@ flow_hw_clear_port_info(struct rte_eth_dev *dev)\n }\n \n static int\n-flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,\n+flow_hw_conntrack_destroy(struct rte_eth_dev *dev,\n \t\t\t  uint32_t idx,\n \t\t\t  struct rte_flow_error *error)\n {\n-\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n \tuint32_t ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n-\tstruct rte_eth_dev *owndev = &rte_eth_devices[owner];\n-\tstruct mlx5_priv *priv = owndev->data->dev_private;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n \tstruct mlx5_aso_ct_action *ct;\n \n+\tif (priv->shared_host)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"CT destruction is not allowed to guest port\");\n \tct = mlx5_ipool_get(pool->cts, ct_idx);\n \tif (!ct) {\n \t\treturn rte_flow_error_set(error, EINVAL,\n@@ -9947,14 +9963,13 @@ flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n \tstruct mlx5_aso_ct_action *ct;\n-\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n \tuint32_t ct_idx;\n \n-\tif (owner != PORT_ID(priv))\n-\t\treturn rte_flow_error_set(error, EACCES,\n+\tif (priv->shared_host)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL,\n-\t\t\t\t\"Can't query CT object owned by another port\");\n+\t\t\t\t\"CT query is not allowed to guest port\");\n \tct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n \tct = mlx5_ipool_get(pool->cts, ct_idx);\n \tif (!ct) {\n@@ -9984,15 +9999,14 @@ flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,\n \tstruct mlx5_aso_ct_pool *pool = priv->hws_ctpool;\n \tstruct mlx5_aso_ct_action *ct;\n \tconst struct rte_flow_action_conntrack *new_prf;\n-\tuint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);\n \tuint32_t ct_idx;\n \tint ret = 0;\n \n-\tif (PORT_ID(priv) != owner)\n-\t\treturn rte_flow_error_set(error, EACCES,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"Can't update CT object owned by another port\");\n+\tif (priv->shared_host)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"CT update is not allowed to guest port\");\n \tct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);\n \tct = mlx5_ipool_get(pool->cts, ct_idx);\n \tif (!ct) {\n@@ -10042,6 +10056,13 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n \tint ret;\n \tbool async = !!(queue != MLX5_HW_INV_QUEUE);\n \n+\tif (priv->shared_host) {\n+\t\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"CT create is not allowed to guest port\");\n+\t\treturn NULL;\n+\t}\n \tif (!pool) {\n \t\trte_flow_error_set(error, EINVAL,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n",
    "prefixes": [
        "v2",
        "3/4"
    ]
}