Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/134754/?format=api
https://patches.dpdk.org/api/patches/134754/?format=api", "web_url": "https://patches.dpdk.org/project/dpdk/patch/20231203112543.844014-13-michaelba@nvidia.com/", "project": { "id": 1, "url": "https://patches.dpdk.org/api/projects/1/?format=api", "name": "DPDK", "link_name": "dpdk", "list_id": "dev.dpdk.org", "list_email": "dev@dpdk.org", "web_url": "http://core.dpdk.org", "scm_url": "git://dpdk.org/dpdk", "webscm_url": "http://git.dpdk.org/dpdk", "list_archive_url": "https://inbox.dpdk.org/dev", "list_archive_url_format": "https://inbox.dpdk.org/dev/{}", "commit_url_format": "" }, "msgid": "<20231203112543.844014-13-michaelba@nvidia.com>", "list_archive_url": "https://inbox.dpdk.org/dev/20231203112543.844014-13-michaelba@nvidia.com", "date": "2023-12-03T11:25:32", "name": "[v1,12/23] net/mlx5: add physical device handle", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": true, "hash": "4659a3106824bd3b8b4fcc9ff10ac8f7e4159bff", "submitter": { "id": 1949, "url": "https://patches.dpdk.org/api/people/1949/?format=api", "name": "Michael Baum", "email": "michaelba@nvidia.com" }, "delegate": { "id": 3268, "url": "https://patches.dpdk.org/api/users/3268/?format=api", "username": "rasland", "first_name": "Raslan", "last_name": "Darawsheh", "email": "rasland@nvidia.com" }, "mbox": "https://patches.dpdk.org/project/dpdk/patch/20231203112543.844014-13-michaelba@nvidia.com/mbox/", "series": [ { "id": 30433, "url": "https://patches.dpdk.org/api/series/30433/?format=api", "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=30433", "date": "2023-12-03T11:25:23", "name": "net/mlx5: support Geneve and options for HWS", "version": 1, "mbox": "https://patches.dpdk.org/series/30433/mbox/" } ], "comments": "https://patches.dpdk.org/api/patches/134754/comments/", "check": "warning", "checks": "https://patches.dpdk.org/api/patches/134754/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<dev-bounces@dpdk.org>", "X-Original-To": "patchwork@inbox.dpdk.org", "Delivered-To": "patchwork@inbox.dpdk.org", "Received": [ "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id AAD2A4365F;\n\tSun, 3 Dec 2023 12:27:16 +0100 (CET)", "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 954AD406FF;\n\tSun, 3 Dec 2023 12:26:31 +0100 (CET)", "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2071.outbound.protection.outlook.com [40.107.93.71])\n by mails.dpdk.org (Postfix) with ESMTP id B0A74406FF\n for <dev@dpdk.org>; Sun, 3 Dec 2023 12:26:30 +0100 (CET)", "from DM6PR02CA0112.namprd02.prod.outlook.com (2603:10b6:5:1b4::14)\n by MW3PR12MB4505.namprd12.prod.outlook.com (2603:10b6:303:5a::24) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7046.33; Sun, 3 Dec\n 2023 11:26:27 +0000", "from DS1PEPF0001709A.namprd05.prod.outlook.com\n (2603:10b6:5:1b4:cafe::7a) by DM6PR02CA0112.outlook.office365.com\n (2603:10b6:5:1b4::14) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7046.32 via Frontend\n Transport; Sun, 3 Dec 2023 11:26:27 +0000", "from mail.nvidia.com (216.228.117.161) by\n DS1PEPF0001709A.mail.protection.outlook.com (10.167.18.104) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7068.20 via Frontend Transport; Sun, 3 Dec 2023 11:26:27 +0000", "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Sun, 3 Dec 2023\n 03:26:18 -0800", "from rnnvmail205.nvidia.com (10.129.68.10) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Sun, 3 Dec 2023\n 03:26:18 -0800", "from nvidia.com (10.127.8.13) by mail.nvidia.com (10.129.68.10) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41 via Frontend\n Transport; Sun, 3 Dec 2023 03:26:16 -0800" ], "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=UQ/iWGhHhDeSfXyGY/4AbG9TJqI8NL8F6nagq0qtL6UVKzatj6oyfq2BAP9Kh9smcvzqJbQn+bmCAZNnlrDeGzG65CAxnz8zMUxSr67dSNgyHAq2u1jdWMJaccAS125cuD1/2OEkZvm14aqzNa/J1btzB2EES67cQ12tZrXL4g/qNtJPeAIo/q7jALLL+vZ25XRHg4mQwSR05pKwwrDvfYJ88QhPJPTDFog6dz3QAG6RIl7EmgJqEL5XijMgzoomlemxqcE4fv1GMy9/i8LBko2NYMCdIKUPLGCGapefU7OjF1wd+1TGgiatHDdUVks3LSYef5aKM0ovcNOjeLI5SA==", "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=i7U4O6jAaafQvmSGv9+5GQyMSdIh9qK1cyO/6ndG8jQ=;\n b=YBwE4G/sQnXRGIY/KQgBYyuMJCt2vkCLlGJc0+U4ETtPrTM1Zth2g3/IIe5d8Srd7qCDBZgTE1ZZiSSe30qkBuXv9tQnlulS85EmD83aUHh26rd34tY9JhUH/tMReGr8nBMPg0KgyiJdEAb4lIE2r1/9z4RoA5Z+0E9yr1rE1wnRLrJJg/vzjE4+2puTTFcD1VpV7Fahxz+nq4DI20SSWHUx8rbUKIjrrwSfjluJkUxoI7IT5QWu/n1Vs7aOOBhxFiGXzP28waJ3VwLE3VHNhqrp+Fcg1zW5yweM0NwCQnHbO+9LQPE0duBYPqiC3hwXmSqsE2e8PrAU9DzWq/j5pA==", "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)", "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=i7U4O6jAaafQvmSGv9+5GQyMSdIh9qK1cyO/6ndG8jQ=;\n b=iWleCqKizAnLmlmqRYzY894iYxs4JsX/b2CoJmOVKckEXXCZkbufSrIt/McPuJWhA2DXc11GDwp4rA9U4aDd8URQB6rAhCuq3CK95dmBCpZbrTYwi0ah5qACRlBKhuFxLtiklRTQsoET/+J4CFcfdgGgtZR5Jo9zANUdxazDZWlrmYMloXzwfyJaSelaCbOf69zI5lHaoshred1eKl3I7IsoeyFHmBXAEed7OPplL12snm+OGEpu/pat5m5mEd2FKZbNXyXVCO6JZ79Q4/Pm30Ub8w4EnIkBOIc74cqfLw2tAE9WiZnfFswpDVInkIcMgjQwvvViBLSFA5Ns/fOZ8A==", "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;", "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C", "From": "Michael Baum <michaelba@nvidia.com>", "To": "<dev@dpdk.org>", "CC": "Matan Azrad <matan@nvidia.com>, Raslan Darawsheh <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>", "Subject": "[PATCH v1 12/23] net/mlx5: add physical device handle", "Date": "Sun, 3 Dec 2023 13:25:32 +0200", "Message-ID": "<20231203112543.844014-13-michaelba@nvidia.com>", "X-Mailer": "git-send-email 2.25.1", "In-Reply-To": "<20231203112543.844014-1-michaelba@nvidia.com>", "References": "<20231203112543.844014-1-michaelba@nvidia.com>", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "Content-Type": "text/plain", "X-NV-OnPremToCloud": "ExternallySecured", "X-EOPAttributedMessage": "0", "X-MS-PublicTrafficType": "Email", "X-MS-TrafficTypeDiagnostic": "DS1PEPF0001709A:EE_|MW3PR12MB4505:EE_", "X-MS-Office365-Filtering-Correlation-Id": "761e45e2-a11c-403b-5595-08dbf3f2b093", "X-MS-Exchange-SenderADCheck": "1", "X-MS-Exchange-AntiSpam-Relay": "0", "X-Microsoft-Antispam": "BCL:0;", "X-Microsoft-Antispam-Message-Info": "\n MC8kHHkSos50Ug/aqRtwsfToD/1Z6IPs2PFvvsNsmtCkdFvij58Al9BrbZ5lZWfQNAhZ+Tl5RTGLRszyI9T/WY6JCv9WiAgl/oRftRXh9xeK8dTuAVUuAgXIEZpCIs+r0PjcOXJ1M746SyqJGM6vJt03Nfnrxn6BP3s3BXnvPP9HAwL9RFx/+UZ6LFsXe/7v3HsV3X2FYkq/QVYQCpvk/qioKbKIVbGDtRBfY/qXxieRQk+uPdolyg7NXfTdNYs4UsSXI8LqsJbfngSGmKgtkOUP3PttNRy5rjRrQZsWl7ihDoKLNcWXQ1fcVSy0VPZj8OJ2Ye+2USYtwQwtMKjoO90Zxmiw5o3ufMIuViVPQMF5O68YKV0GYEvNwU0nkFX+wA2tEkuDeBmvIh1SQtE13FYkQxSvqZwdqUBgF7SSjsow18Xq5NH7DkpxHBXMFcoywARW9udKTC5fhCCdnXdqdTU9WxM1tB1ZyIIIgy2CkVFoEDKGbeyutPGx+ha67EncI+R6pDhkxbnqIGs9z2yis4SPVSYqK4djui4Fb70P/hw1PFbHGnTJ1vQnFVag3gF47R6718J7N2m2X1wWmfmfgW0LNjyHbNM3xX9eSqC0cBcxVXyxvTjvDuZ4QRou1wBjwrhh2NYe5q+a/yruTLLh6h3e+6tt601Q52Z9tSvK5uhOx8Y8uBPOWXvKqBxvJPP4X9iwjtElXj2kul2hSCshEmCzMQRBY3rKOjM60/S++9DYEMQWp93e1lyq1W/MmXRD", "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230031)(4636009)(136003)(396003)(39860400002)(346002)(376002)(230922051799003)(186009)(64100799003)(451199024)(82310400011)(1800799012)(36840700001)(46966006)(40470700004)(5660300002)(40460700003)(86362001)(4326008)(8676002)(8936002)(2906002)(41300700001)(36756003)(2616005)(107886003)(40480700001)(1076003)(356005)(82740400003)(426003)(83380400001)(7636003)(26005)(6286002)(336012)(478600001)(7696005)(6666004)(47076005)(36860700001)(55016003)(316002)(54906003)(6916009)(70206006)(70586007);\n DIR:OUT; SFP:1101;", "X-OriginatorOrg": "Nvidia.com", "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "03 Dec 2023 11:26:27.1731 (UTC)", "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 761e45e2-a11c-403b-5595-08dbf3f2b093", "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a", "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]", "X-MS-Exchange-CrossTenant-AuthSource": "\n DS1PEPF0001709A.namprd05.prod.outlook.com", "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous", "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem", "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW3PR12MB4505", "X-BeenThere": "dev@dpdk.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "DPDK patches and discussions <dev.dpdk.org>", "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>", "List-Archive": "<http://mails.dpdk.org/archives/dev/>", "List-Post": "<mailto:dev@dpdk.org>", "List-Help": "<mailto:dev-request@dpdk.org?subject=help>", "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>", "Errors-To": "dev-bounces@dpdk.org" }, "content": "Add structure describing physical device, and manage physical device\nglobal list.\n\nSigned-off-by: Michael Baum <michaelba@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c | 77 ++++++++++++++++++++++++++++++++++++-----\n drivers/net/mlx5/mlx5.h | 13 +++++++\n 2 files changed, 82 insertions(+), 8 deletions(-)", "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 3a182de248..f9fc652136 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -190,9 +190,10 @@ struct mlx5_shared_data *mlx5_shared_data;\n /** Driver-specific log messages type. */\n int mlx5_logtype;\n \n-static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =\n-\t\t\t\t\t\tLIST_HEAD_INITIALIZER();\n+static LIST_HEAD(mlx5_dev_ctx_list, mlx5_dev_ctx_shared) dev_ctx_list = LIST_HEAD_INITIALIZER();\n+static LIST_HEAD(mlx5_phdev_list, mlx5_physical_device) phdev_list = LIST_HEAD_INITIALIZER();\n static pthread_mutex_t mlx5_dev_ctx_list_mutex;\n+\n static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \t[MLX5_IPOOL_DECAP_ENCAP] = {\n@@ -1692,6 +1693,60 @@ mlx5_init_shared_dev_registers(struct mlx5_dev_ctx_shared *sh)\n \tmlx5_init_hws_flow_tags_registers(sh);\n }\n \n+static struct mlx5_physical_device *\n+mlx5_get_physical_device(struct mlx5_common_device *cdev)\n+{\n+\tstruct mlx5_physical_device *phdev;\n+\tstruct mlx5_hca_attr *attr = &cdev->config.hca_attr;\n+\n+\t/* Search for physical device by system_image_guid. */\n+\tLIST_FOREACH(phdev, &phdev_list, next) {\n+\t\tif (phdev->guid == attr->system_image_guid) {\n+\t\t\tphdev->refcnt++;\n+\t\t\treturn phdev;\n+\t\t}\n+\t}\n+\tphdev = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,\n+\t\t\t sizeof(struct mlx5_physical_device),\n+\t\t\t RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\tif (!phdev) {\n+\t\tDRV_LOG(ERR, \"Physical device allocation failure.\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tphdev->guid = attr->system_image_guid;\n+\tphdev->refcnt = 1;\n+\tLIST_INSERT_HEAD(&phdev_list, phdev, next);\n+\tDRV_LOG(DEBUG, \"Physical device is created, guid=%\" PRIu64 \".\",\n+\t\tphdev->guid);\n+\treturn phdev;\n+}\n+\n+static void\n+mlx5_physical_device_destroy(struct mlx5_physical_device *phdev)\n+{\n+#ifdef RTE_LIBRTE_MLX5_DEBUG\n+\t/* Check the object presence in the list. */\n+\tstruct mlx5_physical_device *lphdev;\n+\n+\tLIST_FOREACH(lphdev, &phdev_list, next)\n+\t\tif (lphdev == phdev)\n+\t\t\tbreak;\n+\tMLX5_ASSERT(lphdev);\n+\tif (lphdev != phdev) {\n+\t\tDRV_LOG(ERR, \"Freeing non-existing physical device\");\n+\t\treturn;\n+\t}\n+#endif\n+\tMLX5_ASSERT(phdev);\n+\tMLX5_ASSERT(phdev->refcnt);\n+\tif (--phdev->refcnt)\n+\t\treturn;\n+\t/* Remove physical device from the global device list. */\n+\tLIST_REMOVE(phdev, next);\n+\tmlx5_free(phdev);\n+}\n+\n /**\n * Allocate shared device context. If there is multiport device the\n * master and representors will share this context, if there is single\n@@ -1725,7 +1780,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n \tpthread_mutex_lock(&mlx5_dev_ctx_list_mutex);\n \t/* Search for IB context by device name. */\n-\tLIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {\n+\tLIST_FOREACH(sh, &dev_ctx_list, next) {\n \t\tif (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {\n \t\t\tsh->refcnt++;\n \t\t\tgoto exit;\n@@ -1765,6 +1820,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \t\tsizeof(sh->ibdev_name) - 1);\n \tstrncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),\n \t\tsizeof(sh->ibdev_path) - 1);\n+\tsh->phdev = mlx5_get_physical_device(sh->cdev);\n+\tif (!sh->phdev)\n+\t\tgoto error;\n \t/*\n \t * Setting port_id to max unallowed value means there is no interrupt\n \t * subhandler installed for the given port index i.\n@@ -1798,7 +1856,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n #endif\n \t}\n \tmlx5_os_dev_shared_handler_install(sh);\n-\tif (LIST_EMPTY(&mlx5_dev_ctx_list)) {\n+\tif (LIST_EMPTY(&dev_ctx_list)) {\n \t\terr = mlx5_flow_os_init_workspace_once();\n \t\tif (err)\n \t\t\tgoto error;\n@@ -1811,7 +1869,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \tmlx5_flow_aging_init(sh);\n \tmlx5_flow_ipool_create(sh);\n \t/* Add context to the global device list. */\n-\tLIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);\n+\tLIST_INSERT_HEAD(&dev_ctx_list, sh, next);\n \trte_spinlock_init(&sh->geneve_tlv_opt_sl);\n \tmlx5_init_shared_dev_registers(sh);\n \t/* Init counter pool list header and lock. */\n@@ -1833,6 +1891,8 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \t} while (++i <= (uint32_t)sh->bond.n_port);\n \tif (sh->td)\n \t\tclaim_zero(mlx5_devx_cmd_destroy(sh->td));\n+\tif (sh->phdev)\n+\t\tmlx5_physical_device_destroy(sh->phdev);\n \tmlx5_free(sh);\n \trte_errno = err;\n \treturn NULL;\n@@ -1919,7 +1979,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)\n \t/* Check the object presence in the list. */\n \tstruct mlx5_dev_ctx_shared *lctx;\n \n-\tLIST_FOREACH(lctx, &mlx5_dev_ctx_list, next)\n+\tLIST_FOREACH(lctx, &dev_ctx_list, next)\n \t\tif (lctx == sh)\n \t\t\tbreak;\n \tMLX5_ASSERT(lctx);\n@@ -1945,7 +2005,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)\n \t/* Remove context from the global device list. */\n \tLIST_REMOVE(sh, next);\n \t/* Release resources on the last device removal. */\n-\tif (LIST_EMPTY(&mlx5_dev_ctx_list)) {\n+\tif (LIST_EMPTY(&dev_ctx_list)) {\n \t\tmlx5_os_net_cleanup();\n \t\tmlx5_flow_os_release_workspace();\n \t}\n@@ -1985,6 +2045,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)\n \t\tMLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);\n \tpthread_mutex_destroy(&sh->txpp.mutex);\n \tmlx5_lwm_unset(sh);\n+\tmlx5_physical_device_destroy(sh->phdev);\n \tmlx5_free(sh);\n \treturn;\n exit:\n@@ -2929,7 +2990,7 @@ mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,\n \t\treturn 0;\n \tpthread_mutex_lock(&mlx5_dev_ctx_list_mutex);\n \t/* Search for IB context by common device pointer. */\n-\tLIST_FOREACH(sh, &mlx5_dev_ctx_list, next)\n+\tLIST_FOREACH(sh, &dev_ctx_list, next)\n \t\tif (sh->cdev == cdev)\n \t\t\tbreak;\n \tpthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 263ebead7f..6a82c38cf4 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1419,6 +1419,18 @@ struct mlx5_dev_registers {\n #define HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT\n #endif\n \n+/**\n+ * Physical device structure.\n+ * This device is created once per NIC to manage recourses shared by all ports\n+ * under same physical device.\n+ */\n+struct mlx5_physical_device {\n+\tLIST_ENTRY(mlx5_physical_device) next;\n+\tstruct mlx5_dev_ctx_shared *sh; /* Created on sherd context. */\n+\tuint64_t guid; /* System image guid, the uniq ID of physical device. */\n+\tuint32_t refcnt;\n+};\n+\n /*\n * Shared Infiniband device context for Master/Representors\n * which belong to same IB device with multiple IB ports.\n@@ -1449,6 +1461,7 @@ struct mlx5_dev_ctx_shared {\n \tuint32_t max_port; /* Maximal IB device port index. */\n \tstruct mlx5_bond_info bond; /* Bonding information. */\n \tstruct mlx5_common_device *cdev; /* Backend mlx5 device. */\n+\tstruct mlx5_physical_device *phdev; /* Backend physical device. */\n \tuint32_t tdn; /* Transport Domain number. */\n \tchar ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */\n \tchar ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */\n", "prefixes": [ "v1", "12/23" ] }{ "id": 134754, "url": "