get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/102322/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 102322,
    "url": "http://patches.dpdk.org/api/patches/102322/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211019205602.3188203-17-michaelba@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211019205602.3188203-17-michaelba@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211019205602.3188203-17-michaelba@nvidia.com",
    "date": "2021-10-19T20:56:00",
    "name": "[v3,16/18] common/mlx5: share MR management",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "09efbb779cc02c8c48e10252e24255d97ad604ab",
    "submitter": {
        "id": 1949,
        "url": "http://patches.dpdk.org/api/people/1949/?format=api",
        "name": "Michael Baum",
        "email": "michaelba@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211019205602.3188203-17-michaelba@nvidia.com/mbox/",
    "series": [
        {
            "id": 19808,
            "url": "http://patches.dpdk.org/api/series/19808/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19808",
            "date": "2021-10-19T20:55:44",
            "name": "mlx5: sharing global MR cache between drivers",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/19808/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/102322/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/102322/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1A79CA0C41;\n\tTue, 19 Oct 2021 22:58:32 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 078B6411A4;\n\tTue, 19 Oct 2021 22:57:24 +0200 (CEST)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam11on2056.outbound.protection.outlook.com [40.107.223.56])\n by mails.dpdk.org (Postfix) with ESMTP id D73A1411A2\n for <dev@dpdk.org>; Tue, 19 Oct 2021 22:57:06 +0200 (CEST)",
            "from BN0PR02CA0001.namprd02.prod.outlook.com (2603:10b6:408:e4::6)\n by MN2PR12MB3838.namprd12.prod.outlook.com (2603:10b6:208:16c::11) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.16; Tue, 19 Oct\n 2021 20:57:01 +0000",
            "from BN8NAM11FT033.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:e4:cafe::6a) by BN0PR02CA0001.outlook.office365.com\n (2603:10b6:408:e4::6) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4628.15 via Frontend\n Transport; Tue, 19 Oct 2021 20:57:01 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT033.mail.protection.outlook.com (10.13.177.149) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4608.15 via Frontend Transport; Tue, 19 Oct 2021 20:57:00 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 19 Oct\n 2021 20:56:50 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=dXClgZ25HVgzOxHqxjJpe+loWU/JZOxkJk4e/owUaAG821e1gE1qazxl0tLbJ1zF5DjI3U892jLaMe/5KvTb/r2rwR9wf4wJEhI0izt3zcFRKYH93EWr6OuysU9PFddduVMgp7kppRkAvt9ij0uIrx/Shy/9RYucyEiAkb5gVksxxC/h8WwSluuPSTkGvxo7TmlFGwpbkuuKgxYnT+pV0XMHwTJHRX2ib47wURG0qLBqpSASKo7+vzXATOz0I3oxFJhTSkbumUApfdG3nuUYGDWw35fOc9nG1OT18xU7sWbx5TtMXiocHCgp8i26eX8140WKVQHS1Qup3gU003TxQA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=HRHHpGZhM2xwjAZmDPUneDXZ61+PF1qVOJuWhtUN7Q4=;\n b=hKNeyv68FISruHOqxHJcj3CODw3Qsy7kR33dxJVIPNcA+wv3ko5X7Mb1Y/Rz7PlKfiux3r1xy5FX0bOlHNYdj9w+2qlbh4trEhTRzVSc3+OOaIROrokGruOXyGcsq5kdLgeaAuL/tpWwlAfc4XMi5zJu1hYtt7ka3M1ROl00ovJZzOGVPPprAPY/UMX+4pFw/SYeXsW8rVZJ3jxIc1z9pdJZZjvU9YptKlDhDb/her2XSioNHTsAyMnMqcDf5ld/pierZBoic9KBP6wUFOz7kZDuuLGZ/bsnB190LN2B8tV/EnurgX7RFCPmUYVUEyMaZhpQu8AGPkNuMq5Ctfp/TQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=HRHHpGZhM2xwjAZmDPUneDXZ61+PF1qVOJuWhtUN7Q4=;\n b=SQglkP5ByaRO0J+7147NnKsHT8mjN6cIOi/nNtAuWzG9LIU3gpRtSK50zMB9tIdyCu9c27BNbTFBs7C9D/qxAlqfnW6DrwzgBh550pXakHtTox56ZqSO/hNPjMZz0vYK22TrXlTF8Nd+gH3o0p3NEuJsVPvk4+3GxnWiEm8NxKQqFQ52WGDFAb481bXw+idg9ULkt9+WzQGCTaNwvmjmTrTeFhxHXfJZt2sP1WfxQe43KEHS1MNXCPqzCfdqU1OWXoWJIa4ueP98QWiRRoNgcVUEpyPmzJ0HFt1BMCfcwM2wSvtbUEMG+1yWB00cw93mmcnKVy0HIps0Rppcby0gPQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; monjalon.net; dkim=none (message not signed)\n header.d=none;monjalon.net; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "<michaelba@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Matan Azrad <matan@nvidia.com>, Thomas Monjalon <thomas@monjalon.net>,\n Michael Baum <michaelba@oss.nvidia.com>",
        "Date": "Tue, 19 Oct 2021 23:56:00 +0300",
        "Message-ID": "<20211019205602.3188203-17-michaelba@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20211019205602.3188203-1-michaelba@nvidia.com>",
        "References": "<20211006220350.2357487-1-michaelba@nvidia.com>\n <20211019205602.3188203-1-michaelba@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "fc696b5a-539d-4f56-d2ca-08d99342ff79",
        "X-MS-TrafficTypeDiagnostic": "MN2PR12MB3838:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <MN2PR12MB3838A4AE5BA08AE7EE07BB53CCBD9@MN2PR12MB3838.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:298;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n JH7nVvuJh6kcQLDb3WutVeQSKlfNwVsF/KQNkeguwYcHpTJL25mmrAj6LRf2bZUw2IhMcPLEuZIJ1/n48+qkOiBlO22eZMFN8O68LQQy/d29SGHNfSjj3P4WNkPyeaXau2oDC4Rc2WbgD6Y0VD2/ymhZBawU2YY1yHRHNqOx+6/tL4CzrfwtZ2LBbxGNKfeVzodgK4msfbX/t8gITSJzQGXM/vjYfpIOeLuRDCLAplXOuaHwHUfoJph2Yv6Gdyjd4d7T9yHZ0YDWWv+cLNxUc3mkgtuOBFGEiMNcal6uPzso0iNu8jvu9SlvNU+CyDEMVnJWD8IAQpEaWjwR+pYcIupAF3jDTPqcRl8hJukkHnMJkc/8Vue/d/3n1om8po/8tT24BDjOSBSCPjuGIc8PuklTyYPRf+B8uFdaTKxcp5xjXCCit7emYFX/q9ZQt/CuqKWWwT1xzr6zcJrdYvqhveuYuHQ1Hl7xftu9+InIR+3OGdr44vz5ewbA8xrr936aML6RIVEXXeKu+BROcsYf49oQzLdWOav8ojmjGiArmcxiPRwAhIWnMIqp+u2B7w+NWz1qDhVKQhlMzyrf46CSZafi6OapqhHUB82enQ5lFp3btsLZ+UgaTO7+0BHWL0VNLtCFaQejG+sqKpnAb+w6XQh9x8tBmvT0KeaV1ARwQMnBSDOKaSCoKQOuFQLUcY2rN28sIl0U4ig5cextVl9qUw==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(70206006)(36756003)(1076003)(36906005)(2616005)(70586007)(7636003)(426003)(336012)(316002)(6916009)(30864003)(47076005)(4326008)(82310400003)(2906002)(36860700001)(83380400001)(5660300002)(55016002)(8676002)(7696005)(86362001)(8936002)(356005)(107886003)(186003)(508600001)(16526019)(54906003)(6666004)(2876002)(6286002)(26005)(579004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "19 Oct 2021 20:57:00.3609 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n fc696b5a-539d-4f56-d2ca-08d99342ff79",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT033.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MN2PR12MB3838",
        "Subject": "[dpdk-dev] [PATCH v3 16/18] common/mlx5: share MR management",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Michael Baum <michaelba@oss.nvidia.com>\n\nAdd global shared MR cache as a field of common device structure.\nMove MR management to use this global cache for all drivers.\n\nSigned-off-by: Michael Baum <michaelba@oss.nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common.c        | 54 ++++++++++++++++-\n drivers/common/mlx5/mlx5_common.h        |  4 +-\n drivers/common/mlx5/mlx5_common_mr.c     |  7 +--\n drivers/common/mlx5/mlx5_common_mr.h     |  4 --\n drivers/common/mlx5/version.map          |  4 --\n drivers/compress/mlx5/mlx5_compress.c    | 57 +-----------------\n drivers/crypto/mlx5/mlx5_crypto.c        | 56 +----------------\n drivers/crypto/mlx5/mlx5_crypto.h        |  1 -\n drivers/net/mlx5/linux/mlx5_mp_os.c      |  2 +-\n drivers/net/mlx5/linux/mlx5_os.c         |  5 --\n drivers/net/mlx5/mlx5.c                  | 36 ++---------\n drivers/net/mlx5/mlx5.h                  |  3 -\n drivers/net/mlx5/mlx5_flow_aso.c         | 28 ++++-----\n drivers/net/mlx5/mlx5_mr.c               | 76 +++++++-----------------\n drivers/net/mlx5/mlx5_mr.h               | 26 --------\n drivers/net/mlx5/mlx5_rx.c               |  1 -\n drivers/net/mlx5/mlx5_rx.h               |  6 +-\n drivers/net/mlx5/mlx5_rxq.c              |  4 +-\n drivers/net/mlx5/mlx5_rxtx.c             |  1 -\n drivers/net/mlx5/mlx5_rxtx.h             |  1 -\n drivers/net/mlx5/mlx5_rxtx_vec.h         |  1 -\n drivers/net/mlx5/mlx5_trigger.c          |  3 +-\n drivers/net/mlx5/mlx5_tx.c               |  1 -\n drivers/net/mlx5/mlx5_tx.h               |  1 -\n drivers/net/mlx5/mlx5_txq.c              |  2 +-\n drivers/net/mlx5/windows/mlx5_os.c       | 14 -----\n drivers/regex/mlx5/mlx5_regex.c          | 63 --------------------\n drivers/regex/mlx5/mlx5_regex.h          |  3 -\n drivers/regex/mlx5/mlx5_regex_control.c  |  2 +-\n drivers/regex/mlx5/mlx5_regex_fastpath.c |  2 +-\n 30 files changed, 110 insertions(+), 358 deletions(-)\n delete mode 100644 drivers/net/mlx5/mlx5_mr.h",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c\nindex 17a54acf1e..d6acf87493 100644\n--- a/drivers/common/mlx5/mlx5_common.c\n+++ b/drivers/common/mlx5/mlx5_common.c\n@@ -308,6 +308,41 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)\n #endif\n }\n \n+/**\n+ * Callback for memory event.\n+ *\n+ * @param event_type\n+ *   Memory event type.\n+ * @param addr\n+ *   Address of memory.\n+ * @param len\n+ *   Size of memory.\n+ */\n+static void\n+mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n+\t\t     size_t len, void *arg __rte_unused)\n+{\n+\tstruct mlx5_common_device *cdev;\n+\n+\t/* Must be called from the primary process. */\n+\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n+\tswitch (event_type) {\n+\tcase RTE_MEM_EVENT_FREE:\n+\t\tpthread_mutex_lock(&devices_list_lock);\n+\t\t/* Iterate all the existing mlx5 devices. */\n+\t\tTAILQ_FOREACH(cdev, &devices_list, next)\n+\t\t\tmlx5_free_mr_by_addr(&cdev->mr_scache,\n+\t\t\t\t\t     mlx5_os_get_ctx_device_name\n+\t\t\t\t\t\t\t\t    (cdev->ctx),\n+\t\t\t\t\t     addr, len);\n+\t\tpthread_mutex_unlock(&devices_list_lock);\n+\t\tbreak;\n+\tcase RTE_MEM_EVENT_ALLOC:\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n /**\n  * Uninitialize all HW global of device context.\n  *\n@@ -376,8 +411,13 @@ mlx5_common_dev_release(struct mlx5_common_device *cdev)\n \tpthread_mutex_lock(&devices_list_lock);\n \tTAILQ_REMOVE(&devices_list, cdev, next);\n \tpthread_mutex_unlock(&devices_list_lock);\n-\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\tif (TAILQ_EMPTY(&devices_list))\n+\t\t\trte_mem_event_callback_unregister(\"MLX5_MEM_EVENT_CB\",\n+\t\t\t\t\t\t\t  NULL);\n+\t\tmlx5_mr_release_cache(&cdev->mr_scache);\n \t\tmlx5_dev_hw_global_release(cdev);\n+\t}\n \trte_free(cdev);\n }\n \n@@ -412,6 +452,18 @@ mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)\n \t\trte_free(cdev);\n \t\treturn NULL;\n \t}\n+\t/* Initialize global MR cache resources and update its functions. */\n+\tret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to initialize global MR share cache.\");\n+\t\tmlx5_dev_hw_global_release(cdev);\n+\t\trte_free(cdev);\n+\t\treturn NULL;\n+\t}\n+\t/* Register callback function for global shared MR cache management. */\n+\tif (TAILQ_EMPTY(&devices_list))\n+\t\trte_mem_event_callback_register(\"MLX5_MEM_EVENT_CB\",\n+\t\t\t\t\t\tmlx5_mr_mem_event_cb, NULL);\n exit:\n \tpthread_mutex_lock(&devices_list_lock);\n \tTAILQ_INSERT_HEAD(&devices_list, cdev, next);\ndiff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h\nindex 8df4f32aa2..1a6b8c0f52 100644\n--- a/drivers/common/mlx5/mlx5_common.h\n+++ b/drivers/common/mlx5/mlx5_common.h\n@@ -350,6 +350,7 @@ struct mlx5_common_device {\n \tvoid *ctx; /* Verbs/DV/DevX context. */\n \tvoid *pd; /* Protection Domain. */\n \tuint32_t pdn; /* Protection Domain Number. */\n+\tstruct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */\n \tstruct mlx5_common_dev_config config; /* Device configuration. */\n };\n \n@@ -453,8 +454,7 @@ mlx5_dev_is_pci(const struct rte_device *dev);\n __rte_internal\n uint32_t\n mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,\n-\t      struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,\n-\t      struct mlx5_mr_share_cache *share_cache);\n+\t      struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf);\n \n /* mlx5_common_os.c */\n \ndiff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c\nindex 4de1c25f2a..d63e973b60 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.c\n+++ b/drivers/common/mlx5/mlx5_common_mr.c\n@@ -1848,16 +1848,13 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,\n  *   Pointer to per-queue MR control structure.\n  * @param mbuf\n  *   Pointer to mbuf.\n- * @param share_cache\n- *   Pointer to a global shared MR cache.\n  *\n  * @return\n  *   Searched LKey on success, UINT32_MAX on no match.\n  */\n uint32_t\n mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,\n-\t      struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,\n-\t      struct mlx5_mr_share_cache *share_cache)\n+\t      struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)\n {\n \tuint32_t lkey;\n \tuintptr_t addr = (uintptr_t)mbuf->buf_addr;\n@@ -1871,6 +1868,6 @@ mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,\n \tif (likely(lkey != UINT32_MAX))\n \t\treturn lkey;\n \t/* Take slower bottom-half on miss. */\n-\treturn mlx5_mr_addr2mr_bh(cdev->pd, mp_id, share_cache, mr_ctrl,\n+\treturn mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,\n \t\t\t\t  addr, cdev->config.mr_ext_memseg_en);\n }\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h\nindex 36689dfb54..0bc3519fd9 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.h\n+++ b/drivers/common/mlx5/mlx5_common_mr.h\n@@ -140,9 +140,7 @@ __rte_internal\n uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,\n \t\t\t       struct mlx5_mr_ctrl *mr_ctrl,\n \t\t\t       struct rte_mempool *mp, uintptr_t addr);\n-__rte_internal\n void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);\n-__rte_internal\n int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);\n __rte_internal\n void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);\n@@ -150,7 +148,6 @@ __rte_internal\n void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);\n __rte_internal\n void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);\n-__rte_internal\n void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,\n \t\t\t  const char *ibdev_name, const void *addr, size_t len);\n __rte_internal\n@@ -183,7 +180,6 @@ __rte_internal\n void\n mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);\n \n-__rte_internal\n void\n mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);\n \ndiff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map\nindex 292c5ede89..12128e4738 100644\n--- a/drivers/common/mlx5/version.map\n+++ b/drivers/common/mlx5/version.map\n@@ -109,7 +109,6 @@ INTERNAL {\n \tmlx5_mr_addr2mr_bh;\n \tmlx5_mr_btree_dump;\n \tmlx5_mr_btree_free;\n-\tmlx5_mr_create_cache;\n \tmlx5_mr_create_primary;\n     mlx5_mr_ctrl_init;\n \tmlx5_mr_dump_cache;\n@@ -119,9 +118,7 @@ INTERNAL {\n \tmlx5_mr_lookup_cache;\n \tmlx5_mr_lookup_list;\n \tmlx5_mr_mb2mr;\n-\tmlx5_free_mr_by_addr;\n \tmlx5_mr_rebuild_cache;\n-\tmlx5_mr_release_cache;\n \n \tmlx5_nl_allmulti; # WINDOWS_NO_EXPORT\n \tmlx5_nl_ifindex; # WINDOWS_NO_EXPORT\n@@ -139,7 +136,6 @@ INTERNAL {\n \n \tmlx5_os_umem_dereg;\n \tmlx5_os_umem_reg;\n-    mlx5_os_set_reg_mr_cb;\n \n \tmlx5_realloc;\n \ndiff --git a/drivers/compress/mlx5/mlx5_compress.c b/drivers/compress/mlx5/mlx5_compress.c\nindex a5cec27894..f68800ff5d 100644\n--- a/drivers/compress/mlx5/mlx5_compress.c\n+++ b/drivers/compress/mlx5/mlx5_compress.c\n@@ -43,7 +43,6 @@ struct mlx5_compress_priv {\n \tstruct rte_compressdev_config dev_config;\n \tLIST_HEAD(xform_list, mlx5_compress_xform) xform_list;\n \trte_spinlock_t xform_sl;\n-\tstruct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */\n \tvolatile uint64_t *uar_addr;\n \t/* HCA caps*/\n \tuint32_t mmo_decomp_sq:1;\n@@ -206,7 +205,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,\n \t\treturn -rte_errno;\n \t}\n \tdev->data->queue_pairs[qp_id] = qp;\n-\tif (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,\n+\tif (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,\n \t\t\t      priv->dev_config.socket_id)) {\n \t\tDRV_LOG(ERR, \"Cannot allocate MR Btree for qp %u.\",\n \t\t\t(uint32_t)qp_id);\n@@ -444,8 +443,7 @@ mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,\n \tuintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);\n \n \tdseg->bcount = rte_cpu_to_be_32(len);\n-\tdseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf,\n-\t\t\t\t   &qp->priv->mr_scache);\n+\tdseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf);\n \tdseg->pbuf = rte_cpu_to_be_64(addr);\n \treturn dseg->lkey;\n }\n@@ -679,41 +677,6 @@ mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)\n \treturn 0;\n }\n \n-/**\n- * Callback for memory event.\n- *\n- * @param event_type\n- *   Memory event type.\n- * @param addr\n- *   Address of memory.\n- * @param len\n- *   Size of memory.\n- */\n-static void\n-mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n-\t\t\t      size_t len, void *arg __rte_unused)\n-{\n-\tstruct mlx5_compress_priv *priv;\n-\n-\t/* Must be called from the primary process. */\n-\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n-\tswitch (event_type) {\n-\tcase RTE_MEM_EVENT_FREE:\n-\t\tpthread_mutex_lock(&priv_list_lock);\n-\t\t/* Iterate all the existing mlx5 devices. */\n-\t\tTAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)\n-\t\t\tmlx5_free_mr_by_addr(&priv->mr_scache,\n-\t\t\t\t\t     mlx5_os_get_ctx_device_name\n-\t\t\t\t\t\t\t      (priv->cdev->ctx),\n-\t\t\t\t\t     addr, len);\n-\t\tpthread_mutex_unlock(&priv_list_lock);\n-\t\tbreak;\n-\tcase RTE_MEM_EVENT_ALLOC:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n static int\n mlx5_compress_dev_probe(struct mlx5_common_device *cdev)\n {\n@@ -765,18 +728,6 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)\n \t\trte_compressdev_pmd_destroy(priv->compressdev);\n \t\treturn -1;\n \t}\n-\tif (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate shared cache MR memory.\");\n-\t\tmlx5_compress_uar_release(priv);\n-\t\trte_compressdev_pmd_destroy(priv->compressdev);\n-\t\trte_errno = ENOMEM;\n-\t\treturn -rte_errno;\n-\t}\n-\t/* Register callback function for global shared MR cache management. */\n-\tif (TAILQ_EMPTY(&mlx5_compress_priv_list))\n-\t\trte_mem_event_callback_register(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\tmlx5_compress_mr_mem_event_cb,\n-\t\t\t\t\t\tNULL);\n \tpthread_mutex_lock(&priv_list_lock);\n \tTAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);\n \tpthread_mutex_unlock(&priv_list_lock);\n@@ -796,10 +747,6 @@ mlx5_compress_dev_remove(struct mlx5_common_device *cdev)\n \t\tTAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);\n \tpthread_mutex_unlock(&priv_list_lock);\n \tif (priv) {\n-\t\tif (TAILQ_EMPTY(&mlx5_compress_priv_list))\n-\t\t\trte_mem_event_callback_unregister(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\t\t  NULL);\n-\t\tmlx5_mr_release_cache(&priv->mr_scache);\n \t\tmlx5_compress_uar_release(priv);\n \t\trte_compressdev_pmd_destroy(priv->compressdev);\n \t}\ndiff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c\nindex 1105d3fcd5..d857331225 100644\n--- a/drivers/crypto/mlx5/mlx5_crypto.c\n+++ b/drivers/crypto/mlx5/mlx5_crypto.c\n@@ -316,8 +316,7 @@ mlx5_crypto_klm_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp,\n \t*remain -= data_len;\n \tklm->bcount = rte_cpu_to_be_32(data_len);\n \tklm->pbuf = rte_cpu_to_be_64(addr);\n-\tklm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf,\n-\t\t\t\t  &priv->mr_scache);\n+\tklm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf);\n \treturn klm->lkey;\n \n }\n@@ -643,7 +642,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n \t\tDRV_LOG(ERR, \"Failed to create QP.\");\n \t\tgoto error;\n \t}\n-\tif (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,\n+\tif (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,\n \t\t\t      priv->dev_config.socket_id) != 0) {\n \t\tDRV_LOG(ERR, \"Cannot allocate MR Btree for qp %u.\",\n \t\t\t(uint32_t)qp_id);\n@@ -844,41 +843,6 @@ mlx5_crypto_parse_devargs(struct rte_devargs *devargs,\n \treturn 0;\n }\n \n-/**\n- * Callback for memory event.\n- *\n- * @param event_type\n- *   Memory event type.\n- * @param addr\n- *   Address of memory.\n- * @param len\n- *   Size of memory.\n- */\n-static void\n-mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n-\t\t\t    size_t len, void *arg __rte_unused)\n-{\n-\tstruct mlx5_crypto_priv *priv;\n-\n-\t/* Must be called from the primary process. */\n-\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n-\tswitch (event_type) {\n-\tcase RTE_MEM_EVENT_FREE:\n-\t\tpthread_mutex_lock(&priv_list_lock);\n-\t\t/* Iterate all the existing mlx5 devices. */\n-\t\tTAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)\n-\t\t\tmlx5_free_mr_by_addr(&priv->mr_scache,\n-\t\t\t\t\t     mlx5_os_get_ctx_device_name\n-\t\t\t\t\t\t\t      (priv->cdev->ctx),\n-\t\t\t\t\t     addr, len);\n-\t\tpthread_mutex_unlock(&priv_list_lock);\n-\t\tbreak;\n-\tcase RTE_MEM_EVENT_ALLOC:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n static int\n mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)\n {\n@@ -940,13 +904,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)\n \t\trte_cryptodev_pmd_destroy(priv->crypto_dev);\n \t\treturn -1;\n \t}\n-\tif (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate shared cache MR memory.\");\n-\t\tmlx5_crypto_uar_release(priv);\n-\t\trte_cryptodev_pmd_destroy(priv->crypto_dev);\n-\t\trte_errno = ENOMEM;\n-\t\treturn -rte_errno;\n-\t}\n \tpriv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);\n \tpriv->max_segs_num = devarg_prms.max_segs_num;\n \tpriv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +\n@@ -960,11 +917,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)\n \tpriv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;\n \tpriv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;\n \tpriv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);\n-\t/* Register callback function for global shared MR cache management. */\n-\tif (TAILQ_EMPTY(&mlx5_crypto_priv_list))\n-\t\trte_mem_event_callback_register(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\tmlx5_crypto_mr_mem_event_cb,\n-\t\t\t\t\t\tNULL);\n \tpthread_mutex_lock(&priv_list_lock);\n \tTAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);\n \tpthread_mutex_unlock(&priv_list_lock);\n@@ -984,10 +936,6 @@ mlx5_crypto_dev_remove(struct mlx5_common_device *cdev)\n \t\tTAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);\n \tpthread_mutex_unlock(&priv_list_lock);\n \tif (priv) {\n-\t\tif (TAILQ_EMPTY(&mlx5_crypto_priv_list))\n-\t\t\trte_mem_event_callback_unregister(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\t\t  NULL);\n-\t\tmlx5_mr_release_cache(&priv->mr_scache);\n \t\tmlx5_crypto_uar_release(priv);\n \t\trte_cryptodev_pmd_destroy(priv->crypto_dev);\n \t\tclaim_zero(mlx5_devx_cmd_destroy(priv->login_obj));\ndiff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h\nindex 030f369423..69cef81d77 100644\n--- a/drivers/crypto/mlx5/mlx5_crypto.h\n+++ b/drivers/crypto/mlx5/mlx5_crypto.h\n@@ -26,7 +26,6 @@ struct mlx5_crypto_priv {\n \tuint32_t max_segs_num; /* Maximum supported data segs. */\n \tstruct mlx5_hlist *dek_hlist; /* Dek hash list. */\n \tstruct rte_cryptodev_config dev_config;\n-\tstruct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */\n \tstruct mlx5_devx_obj *login_obj;\n \tuint64_t keytag;\n \tuint16_t wqe_set_size;\ndiff --git a/drivers/net/mlx5/linux/mlx5_mp_os.c b/drivers/net/mlx5/linux/mlx5_mp_os.c\nindex 286a7caf36..c3b6495d9e 100644\n--- a/drivers/net/mlx5/linux/mlx5_mp_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_mp_os.c\n@@ -91,7 +91,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)\n \tcase MLX5_MP_REQ_CREATE_MR:\n \t\tmp_init_msg(&priv->mp_id, &mp_res, param->type);\n \t\tlkey = mlx5_mr_create_primary(cdev->pd,\n-\t\t\t\t\t      &priv->sh->share_cache,\n+\t\t\t\t\t      &priv->sh->cdev->mr_scache,\n \t\t\t\t\t      &entry, param->args.addr,\n \t\t\t\t\t      cdev->config.mr_ext_memseg_en);\n \t\tif (lkey == UINT32_MAX)\ndiff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 9e445f2f9b..61c4870d8c 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -44,7 +44,6 @@\n #include \"mlx5_rx.h\"\n #include \"mlx5_tx.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_flow.h\"\n #include \"rte_pmd_mlx5.h\"\n #include \"mlx5_verbs.h\"\n@@ -623,10 +622,6 @@ mlx5_init_once(void)\n \tcase RTE_PROC_PRIMARY:\n \t\tif (sd->init_done)\n \t\t\tbreak;\n-\t\tLIST_INIT(&sd->mem_event_cb_list);\n-\t\trte_rwlock_init(&sd->mem_event_rwlock);\n-\t\trte_mem_event_callback_register(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\tmlx5_mr_mem_event_cb, NULL);\n \t\tret = mlx5_mp_init_primary(MLX5_MP_NAME,\n \t\t\t\t\t   mlx5_mp_os_primary_handle);\n \t\tif (ret)\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex a6c196b368..91aa5c0c75 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -36,7 +36,6 @@\n #include \"mlx5_rx.h\"\n #include \"mlx5_tx.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_flow.h\"\n #include \"mlx5_flow_os.h\"\n #include \"rte_pmd_mlx5.h\"\n@@ -1112,7 +1111,7 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,\n \tstruct mlx5_mp_id mp_id;\n \n \tmlx5_mp_id_init(&mp_id, 0);\n-\tif (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)\n+\tif (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0)\n \t\tDRV_LOG(WARNING, \"Failed to unregister mempool %s for PD %p: %s\",\n \t\t\tmp->name, sh->cdev->pd, rte_strerror(rte_errno));\n }\n@@ -1134,7 +1133,7 @@ mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)\n \tint ret;\n \n \tmlx5_mp_id_init(&mp_id, 0);\n-\tret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,\n+\tret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp,\n \t\t\t\t       &mp_id);\n \tif (ret < 0 && rte_errno != EEXIST)\n \t\tDRV_LOG(ERR, \"Failed to register existing mempool %s for PD %p: %s\",\n@@ -1177,8 +1176,8 @@ mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,\n \tswitch (event) {\n \tcase RTE_MEMPOOL_EVENT_READY:\n \t\tmlx5_mp_id_init(&mp_id, 0);\n-\t\tif (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,\n-\t\t\t\t\t     &mp_id) < 0)\n+\t\tif (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd,\n+\t\t\t\t\t     mp, &mp_id) < 0)\n \t\t\tDRV_LOG(ERR, \"Failed to register new mempool %s for PD %p: %s\",\n \t\t\t\tmp->name, sh->cdev->pd,\n \t\t\t\trte_strerror(rte_errno));\n@@ -1342,20 +1341,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \tfor (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)\n \t\trte_spinlock_init(&sh->uar_lock[i]);\n #endif\n-\t/*\n-\t * Once the device is added to the list of memory event\n-\t * callback, its global MR cache table cannot be expanded\n-\t * on the fly because of deadlock. If it overflows, lookup\n-\t * should be done by searching MR list linearly, which is slow.\n-\t *\n-\t * At this point the device is not added to the memory\n-\t * event list yet, context is just being created.\n-\t */\n-\terr = mlx5_mr_create_cache(&sh->share_cache, sh->numa_node);\n-\tif (err) {\n-\t\terr = rte_errno;\n-\t\tgoto error;\n-\t}\n \tmlx5_os_dev_shared_handler_install(sh);\n \tsh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);\n \tif (!sh->cnt_id_tbl) {\n@@ -1370,11 +1355,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \tmlx5_flow_aging_init(sh);\n \tmlx5_flow_counters_mng_init(sh);\n \tmlx5_flow_ipool_create(sh, config);\n-\t/* Add device to memory callback list. */\n-\trte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);\n-\tLIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,\n-\t\t\t sh, mem_event_cb);\n-\trte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);\n \t/* Add context to the global device list. */\n \tLIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);\n \trte_spinlock_init(&sh->geneve_tlv_opt_sl);\n@@ -1387,8 +1367,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \tMLX5_ASSERT(sh);\n \tif (sh->cnt_id_tbl)\n \t\tmlx5_l3t_destroy(sh->cnt_id_tbl);\n-\tif (sh->share_cache.cache.table)\n-\t\tmlx5_mr_btree_free(&sh->share_cache.cache);\n \tif (sh->tis)\n \t\tclaim_zero(mlx5_devx_cmd_destroy(sh->tis));\n \tif (sh->td)\n@@ -1444,12 +1422,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)\n \tif (ret == 0)\n \t\trte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,\n \t\t\t\t sh);\n-\t/* Remove from memory callback device list. */\n-\trte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);\n-\tLIST_REMOVE(sh, mem_event_cb);\n-\trte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);\n-\t/* Release created Memory Regions. */\n-\tmlx5_mr_release_cache(&sh->share_cache);\n \t/* Remove context from the global device list. */\n \tLIST_REMOVE(sh, next);\n \t/* Release flow workspaces objects on the last device. */\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 5c25b94f36..4f823baa6d 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1142,9 +1142,6 @@ struct mlx5_dev_ctx_shared {\n \tchar ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */\n \tstruct mlx5_dev_attr device_attr; /* Device properties. */\n \tint numa_node; /* Numa node of backing physical device. */\n-\tLIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;\n-\t/**< Called by memory event callback. */\n-\tstruct mlx5_mr_share_cache share_cache;\n \t/* Packet pacing related structure. */\n \tstruct mlx5_dev_txpp txpp;\n \t/* Shared DV/DR flow data section. */\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex 8f3d2ffc2c..1fc1000b01 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -60,17 +60,17 @@ mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,\n /**\n  * Free MR resources.\n  *\n- * @param[in] sh\n- *   Pointer to shared device context.\n+ * @param[in] cdev\n+ *   Pointer to the mlx5 common device.\n  * @param[in] mr\n  *   MR to free.\n  */\n static void\n-mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)\n+mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)\n {\n \tvoid *addr = mr->addr;\n \n-\tsh->share_cache.dereg_mr_cb(mr);\n+\tcdev->mr_scache.dereg_mr_cb(mr);\n \tmlx5_free(addr);\n \tmemset(mr, 0, sizeof(*mr));\n }\n@@ -78,8 +78,8 @@ mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)\n /**\n  * Register Memory Region.\n  *\n- * @param[in] sh\n- *   Pointer to shared device context.\n+ * @param[in] cdev\n+ *   Pointer to the mlx5 common device.\n  * @param[in] length\n  *   Size of MR buffer.\n  * @param[in/out] mr\n@@ -91,7 +91,7 @@ mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,\n+mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,\n \t\tstruct mlx5_pmd_mr *mr, int socket)\n {\n \n@@ -103,7 +103,7 @@ mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,\n \t\tDRV_LOG(ERR, \"Failed to create ASO bits mem for MR.\");\n \t\treturn -1;\n \t}\n-\tret = sh->share_cache.reg_mr_cb(sh->cdev->pd, mr->addr, length, mr);\n+\tret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to create direct Mkey.\");\n \t\tmlx5_free(mr->addr);\n@@ -313,14 +313,14 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,\n \n \tswitch (aso_opc_mod) {\n \tcase ASO_OPC_MOD_FLOW_HIT:\n-\t\tif (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *\n+\t\tif (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *\n \t\t\t\t    sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))\n \t\t\treturn -1;\n \t\tif (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,\n \t\t\t\t       sh->tx_uar, cdev->pdn,\n \t\t\t\t       MLX5_ASO_QUEUE_LOG_DESC,\n \t\t\t\t       cdev->config.hca_attr.sq_ts_format)) {\n-\t\t\tmlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);\n+\t\t\tmlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);\n \t\t\treturn -1;\n \t\t}\n \t\tmlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);\n@@ -335,14 +335,14 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,\n \t\tbreak;\n \tcase ASO_OPC_MOD_CONNECTION_TRACKING:\n \t\t/* 64B per object for query. */\n-\t\tif (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,\n+\t\tif (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,\n \t\t\t\t    &sh->ct_mng->aso_sq.mr, 0))\n \t\t\treturn -1;\n \t\tif (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,\n \t\t\t\t       sh->tx_uar, cdev->pdn,\n \t\t\t\t       MLX5_ASO_QUEUE_LOG_DESC,\n \t\t\t\t       cdev->config.hca_attr.sq_ts_format)) {\n-\t\t\tmlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);\n+\t\t\tmlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);\n \t\t\treturn -1;\n \t\t}\n \t\tmlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);\n@@ -370,14 +370,14 @@ mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,\n \n \tswitch (aso_opc_mod) {\n \tcase ASO_OPC_MOD_FLOW_HIT:\n-\t\tmlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);\n+\t\tmlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr);\n \t\tsq = &sh->aso_age_mng->aso_sq;\n \t\tbreak;\n \tcase ASO_OPC_MOD_POLICER:\n \t\tsq = &sh->mtrmng->pools_mng.sq;\n \t\tbreak;\n \tcase ASO_OPC_MOD_CONNECTION_TRACKING:\n-\t\tmlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);\n+\t\tmlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);\n \t\tsq = &sh->ct_mng->aso_sq;\n \t\tbreak;\n \tdefault:\ndiff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c\nindex 9ce973d95c..38780202dc 100644\n--- a/drivers/net/mlx5/mlx5_mr.c\n+++ b/drivers/net/mlx5/mlx5_mr.c\n@@ -12,46 +12,10 @@\n #include <mlx5_common_mr.h>\n \n #include \"mlx5.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_rx.h\"\n #include \"mlx5_tx.h\"\n \n-/**\n- * Callback for memory event. This can be called from both primary and secondary\n- * process.\n- *\n- * @param event_type\n- *   Memory event type.\n- * @param addr\n- *   Address of memory.\n- * @param len\n- *   Size of memory.\n- */\n-void\n-mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n-\t\t     size_t len, void *arg __rte_unused)\n-{\n-\tstruct mlx5_dev_ctx_shared *sh;\n-\tstruct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;\n-\n-\t/* Must be called from the primary process. */\n-\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n-\tswitch (event_type) {\n-\tcase RTE_MEM_EVENT_FREE:\n-\t\trte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);\n-\t\t/* Iterate all the existing mlx5 devices. */\n-\t\tLIST_FOREACH(sh, dev_list, mem_event_cb)\n-\t\t\tmlx5_free_mr_by_addr(&sh->share_cache,\n-\t\t\t\t\t     sh->ibdev_name, addr, len);\n-\t\trte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);\n-\t\tbreak;\n-\tcase RTE_MEM_EVENT_ALLOC:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n /**\n  * Bottom-half of LKey search on Tx.\n  *\n@@ -72,7 +36,7 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)\n \tstruct mlx5_priv *priv = txq_ctrl->priv;\n \n \treturn mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,\n-\t\t\t\t  &priv->sh->share_cache, mr_ctrl, addr,\n+\t\t\t\t  &priv->sh->cdev->mr_scache, mr_ctrl, addr,\n \t\t\t\t  priv->sh->cdev->config.mr_ext_memseg_en);\n }\n \n@@ -110,7 +74,7 @@ mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)\n \t\t\tmp = buf->mp;\n \t\t}\n \t\tif (mp != NULL) {\n-\t\t\tlkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache,\n+\t\t\tlkey = mlx5_mr_mempool2mr_bh(&priv->sh->cdev->mr_scache,\n \t\t\t\t\t\t     mr_ctrl, mp, addr);\n \t\t\t/*\n \t\t\t * Lookup can only fail on invalid input, e.g. \"addr\"\n@@ -169,7 +133,7 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,\n \tstruct rte_eth_dev *dev;\n \tstruct mlx5_mr *mr;\n \tstruct mlx5_priv *priv;\n-\tstruct mlx5_dev_ctx_shared *sh;\n+\tstruct mlx5_common_device *cdev;\n \n \tdev = dev_to_eth_dev(rte_dev);\n \tif (!dev) {\n@@ -179,20 +143,20 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,\n \t\treturn -1;\n \t}\n \tpriv = dev->data->dev_private;\n-\tsh = priv->sh;\n-\tmr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len,\n-\t\t\t\tSOCKET_ID_ANY, sh->share_cache.reg_mr_cb);\n+\tcdev = priv->sh->cdev;\n+\tmr = mlx5_create_mr_ext(cdev->pd, (uintptr_t)addr, len,\n+\t\t\t\tSOCKET_ID_ANY, cdev->mr_scache.reg_mr_cb);\n \tif (!mr) {\n \t\tDRV_LOG(WARNING,\n \t\t\t\"port %u unable to dma map\", dev->data->port_id);\n \t\trte_errno = EINVAL;\n \t\treturn -1;\n \t}\n-\trte_rwlock_write_lock(&sh->share_cache.rwlock);\n-\tLIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);\n+\trte_rwlock_write_lock(&cdev->mr_scache.rwlock);\n+\tLIST_INSERT_HEAD(&cdev->mr_scache.mr_list, mr, mr);\n \t/* Insert to the global cache table. */\n-\tmlx5_mr_insert_cache(&sh->share_cache, mr);\n-\trte_rwlock_write_unlock(&sh->share_cache.rwlock);\n+\tmlx5_mr_insert_cache(&cdev->mr_scache, mr);\n+\trte_rwlock_write_unlock(&cdev->mr_scache.rwlock);\n \treturn 0;\n }\n \n@@ -217,7 +181,7 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,\n {\n \tstruct rte_eth_dev *dev;\n \tstruct mlx5_priv *priv;\n-\tstruct mlx5_dev_ctx_shared *sh;\n+\tstruct mlx5_common_device *cdev;\n \tstruct mlx5_mr *mr;\n \tstruct mr_cache_entry entry;\n \n@@ -229,11 +193,11 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,\n \t\treturn -1;\n \t}\n \tpriv = dev->data->dev_private;\n-\tsh = priv->sh;\n-\trte_rwlock_write_lock(&sh->share_cache.rwlock);\n-\tmr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);\n+\tcdev = priv->sh->cdev;\n+\trte_rwlock_write_lock(&cdev->mr_scache.rwlock);\n+\tmr = mlx5_mr_lookup_list(&cdev->mr_scache, &entry, (uintptr_t)addr);\n \tif (!mr) {\n-\t\trte_rwlock_write_unlock(&sh->share_cache.rwlock);\n+\t\trte_rwlock_write_unlock(&cdev->mr_scache.rwlock);\n \t\tDRV_LOG(WARNING, \"address 0x%\" PRIxPTR \" wasn't registered to device %s\",\n \t\t\t(uintptr_t)addr, rte_dev->name);\n \t\trte_errno = EINVAL;\n@@ -242,16 +206,16 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,\n \tLIST_REMOVE(mr, mr);\n \tDRV_LOG(DEBUG, \"port %u remove MR(%p) from list\", dev->data->port_id,\n \t      (void *)mr);\n-\tmlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);\n-\tmlx5_mr_rebuild_cache(&sh->share_cache);\n+\tmlx5_mr_free(mr, cdev->mr_scache.dereg_mr_cb);\n+\tmlx5_mr_rebuild_cache(&cdev->mr_scache);\n \t/*\n \t * No explicit wmb is needed after updating dev_gen due to\n \t * store-release ordering in unlock that provides the\n \t * implicit barrier at the software visible level.\n \t */\n-\t++sh->share_cache.dev_gen;\n+\t++cdev->mr_scache.dev_gen;\n \tDRV_LOG(DEBUG, \"broadcasting local cache flush, gen=%d\",\n-\t      sh->share_cache.dev_gen);\n-\trte_rwlock_write_unlock(&sh->share_cache.rwlock);\n+\t      cdev->mr_scache.dev_gen);\n+\trte_rwlock_write_unlock(&cdev->mr_scache.rwlock);\n \treturn 0;\n }\ndiff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h\ndeleted file mode 100644\nindex c984e777b5..0000000000\n--- a/drivers/net/mlx5/mlx5_mr.h\n+++ /dev/null\n@@ -1,26 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2018 6WIND S.A.\n- * Copyright 2018 Mellanox Technologies, Ltd\n- */\n-\n-#ifndef RTE_PMD_MLX5_MR_H_\n-#define RTE_PMD_MLX5_MR_H_\n-\n-#include <stddef.h>\n-#include <stdint.h>\n-#include <sys/queue.h>\n-\n-#include <rte_ethdev.h>\n-#include <rte_rwlock.h>\n-#include <rte_bitmap.h>\n-#include <rte_memory.h>\n-\n-#include <mlx5_common_mr.h>\n-\n-/* First entry must be NULL for comparison. */\n-#define mlx5_mr_btree_len(bt) ((bt)->len - 1)\n-\n-void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n-\t\t\t  size_t len, void *arg);\n-\n-#endif /* RTE_PMD_MLX5_MR_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c\nindex e3b1051ba4..c83c7f4a39 100644\n--- a/drivers/net/mlx5/mlx5_rx.c\n+++ b/drivers/net/mlx5/mlx5_rx.c\n@@ -22,7 +22,6 @@\n #include \"mlx5_autoconf.h\"\n #include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_rx.h\"\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 1b00076fe7..11e4330935 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -18,11 +18,13 @@\n \n #include \"mlx5.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_mr.h\"\n \n /* Support tunnel matching. */\n #define MLX5_FLOW_TUNNEL 10\n \n+/* First entry must be NULL for comparison. */\n+#define mlx5_mr_btree_len(bt) ((bt)->len - 1)\n+\n struct mlx5_rxq_stats {\n #ifdef MLX5_PMD_SOFT_COUNTERS\n \tuint64_t ipackets; /**< Total of successfully received packets. */\n@@ -309,7 +311,7 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)\n \t */\n \trxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);\n \tmp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;\n-\treturn mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->share_cache,\n+\treturn mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->cdev->mr_scache,\n \t\t\t\t     mr_ctrl, mp, addr);\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 53c8c5439d..b866cbfa20 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -1242,7 +1242,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n-\tret = mlx5_mr_mempool_register(&priv->sh->share_cache,\n+\tret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,\n \t\t\t\t       priv->sh->cdev->pd, mp, &priv->mp_id);\n \tif (ret < 0 && rte_errno != EEXIST) {\n \t\tret = rte_errno;\n@@ -1450,7 +1450,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t}\n \ttmpl->type = MLX5_RXQ_TYPE_STANDARD;\n \tif (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,\n-\t\t\t      &priv->sh->share_cache.dev_gen, socket)) {\n+\t\t\t      &priv->sh->cdev->mr_scache.dev_gen, socket)) {\n \t\t/* rte_errno is already set. */\n \t\tgoto error;\n \t}\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex 7b984eff35..ed1f2d2c8c 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -22,7 +22,6 @@\n #include \"mlx5_autoconf.h\"\n #include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_rx.h\"\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex ad1144e218..b400295e7d 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -24,7 +24,6 @@\n #include \"mlx5_utils.h\"\n #include \"mlx5.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_mr.h\"\n \n struct mlx5_priv;\n \ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h\nindex 93b4f517bb..1aec72817e 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h\n@@ -12,7 +12,6 @@\n #include <mlx5_prm.h>\n \n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_mr.h\"\n \n /* HW checksum offload capabilities of vectorized Tx. */\n #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \\\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex cf4fbd3c9f..54c2893437 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -14,7 +14,6 @@\n #include <mlx5_malloc.h>\n \n #include \"mlx5.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_rx.h\"\n #include \"mlx5_tx.h\"\n #include \"mlx5_utils.h\"\n@@ -148,7 +147,7 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)\n \t}\n \tfor (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {\n \t\tmp = rxq_ctrl->rxq.rxseg[s].mp;\n-\t\tret = mlx5_mr_mempool_register(&priv->sh->share_cache,\n+\t\tret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,\n \t\t\t\t\t       priv->sh->cdev->pd, mp,\n \t\t\t\t\t       &priv->mp_id);\n \t\tif (ret < 0 && rte_errno != EEXIST)\ndiff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c\nindex df671379e4..2cc9ae6772 100644\n--- a/drivers/net/mlx5/mlx5_tx.c\n+++ b/drivers/net/mlx5/mlx5_tx.c\n@@ -22,7 +22,6 @@\n #include \"mlx5_autoconf.h\"\n #include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_tx.h\"\ndiff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h\nindex cdbcf659df..bab9008d9b 100644\n--- a/drivers/net/mlx5/mlx5_tx.h\n+++ b/drivers/net/mlx5/mlx5_tx.h\n@@ -18,7 +18,6 @@\n \n #include \"mlx5.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_mr.h\"\n \n /* TX burst subroutines return codes. */\n enum mlx5_txcmp_code {\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex f12510712a..dee3e4a279 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -1118,7 +1118,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\treturn NULL;\n \t}\n \tif (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,\n-\t\t\t      &priv->sh->share_cache.dev_gen, socket)) {\n+\t\t\t      &priv->sh->cdev->mr_scache.dev_gen, socket)) {\n \t\t/* rte_errno is already set. */\n \t\tgoto error;\n \t}\ndiff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c\nindex c3d4b90946..afdfff8b36 100644\n--- a/drivers/net/mlx5/windows/mlx5_os.c\n+++ b/drivers/net/mlx5/windows/mlx5_os.c\n@@ -26,7 +26,6 @@\n #include \"mlx5_rx.h\"\n #include \"mlx5_tx.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_mr.h\"\n #include \"mlx5_flow.h\"\n #include \"mlx5_devx.h\"\n \n@@ -122,21 +121,8 @@ mlx5_init_shared_data(void)\n static int\n mlx5_init_once(void)\n {\n-\tstruct mlx5_shared_data *sd;\n-\n \tif (mlx5_init_shared_data())\n \t\treturn -rte_errno;\n-\tsd = mlx5_shared_data;\n-\trte_spinlock_lock(&sd->lock);\n-\tMLX5_ASSERT(sd);\n-\tif (!sd->init_done) {\n-\t\tLIST_INIT(&sd->mem_event_cb_list);\n-\t\trte_rwlock_init(&sd->mem_event_rwlock);\n-\t\trte_mem_event_callback_register(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\tmlx5_mr_mem_event_cb, NULL);\n-\t\tsd->init_done = true;\n-\t}\n-\trte_spinlock_unlock(&sd->lock);\n \treturn 0;\n }\n \ndiff --git a/drivers/regex/mlx5/mlx5_regex.c b/drivers/regex/mlx5/mlx5_regex.c\nindex b39181ebb5..7f900b67ee 100644\n--- a/drivers/regex/mlx5/mlx5_regex.c\n+++ b/drivers/regex/mlx5/mlx5_regex.c\n@@ -25,10 +25,6 @@\n \n int mlx5_regex_logtype;\n \n-TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list =\n-\t\t\t\tTAILQ_HEAD_INITIALIZER(mlx5_mem_event_list);\n-static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER;\n-\n const struct rte_regexdev_ops mlx5_regexdev_ops = {\n \t.dev_info_get = mlx5_regex_info_get,\n \t.dev_configure = mlx5_regex_configure,\n@@ -86,41 +82,6 @@ mlx5_regex_get_name(char *name, struct rte_device *dev)\n \tsprintf(name, \"mlx5_regex_%s\", dev->name);\n }\n \n-/**\n- * Callback for memory event.\n- *\n- * @param event_type\n- *   Memory event type.\n- * @param addr\n- *   Address of memory.\n- * @param len\n- *   Size of memory.\n- */\n-static void\n-mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n-\t\t\t   size_t len, void *arg __rte_unused)\n-{\n-\tstruct mlx5_regex_priv *priv;\n-\n-\t/* Must be called from the primary process. */\n-\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n-\tswitch (event_type) {\n-\tcase RTE_MEM_EVENT_FREE:\n-\t\tpthread_mutex_lock(&mem_event_list_lock);\n-\t\t/* Iterate all the existing mlx5 devices. */\n-\t\tTAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)\n-\t\t\tmlx5_free_mr_by_addr(&priv->mr_scache,\n-\t\t\t\t\t     mlx5_os_get_ctx_device_name\n-\t\t\t\t\t\t\t      (priv->cdev->ctx),\n-\t\t\t\t\t     addr, len);\n-\t\tpthread_mutex_unlock(&mem_event_list_lock);\n-\t\tbreak;\n-\tcase RTE_MEM_EVENT_ALLOC:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n static int\n mlx5_regex_dev_probe(struct mlx5_common_device *cdev)\n {\n@@ -194,21 +155,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)\n \tpriv->regexdev->device = cdev->dev;\n \tpriv->regexdev->data->dev_private = priv;\n \tpriv->regexdev->state = RTE_REGEXDEV_READY;\n-\tret = mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id());\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"MR init tree failed.\");\n-\t    rte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\t/* Register callback function for global shared MR cache management. */\n-\tif (TAILQ_EMPTY(&mlx5_mem_event_list))\n-\t\trte_mem_event_callback_register(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\tmlx5_regex_mr_mem_event_cb,\n-\t\t\t\t\t\tNULL);\n-\t/* Add device to memory callback list. */\n-\tpthread_mutex_lock(&mem_event_list_lock);\n-\tTAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb);\n-\tpthread_mutex_unlock(&mem_event_list_lock);\n \tDRV_LOG(INFO, \"RegEx GGA is %s.\",\n \t\tpriv->has_umr ? \"supported\" : \"unsupported\");\n \treturn 0;\n@@ -237,15 +183,6 @@ mlx5_regex_dev_remove(struct mlx5_common_device *cdev)\n \t\treturn 0;\n \tpriv = dev->data->dev_private;\n \tif (priv) {\n-\t\t/* Remove from memory callback device list. */\n-\t\tpthread_mutex_lock(&mem_event_list_lock);\n-\t\tTAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb);\n-\t\tpthread_mutex_unlock(&mem_event_list_lock);\n-\t\tif (TAILQ_EMPTY(&mlx5_mem_event_list))\n-\t\t\trte_mem_event_callback_unregister(\"MLX5_MEM_EVENT_CB\",\n-\t\t\t\t\t\t\t  NULL);\n-\t\tif (priv->mr_scache.cache.table)\n-\t\t\tmlx5_mr_release_cache(&priv->mr_scache);\n \t\tif (priv->uar)\n \t\t\tmlx5_glue->devx_free_uar(priv->uar);\n \t\tif (priv->regexdev)\ndiff --git a/drivers/regex/mlx5/mlx5_regex.h b/drivers/regex/mlx5/mlx5_regex.h\nindex be81931b3a..eb59cc38a6 100644\n--- a/drivers/regex/mlx5/mlx5_regex.h\n+++ b/drivers/regex/mlx5/mlx5_regex.h\n@@ -68,9 +68,6 @@ struct mlx5_regex_priv {\n \t\t\t\tMLX5_RXP_EM_COUNT];\n \tuint32_t nb_engines; /* Number of RegEx engines. */\n \tstruct mlx5dv_devx_uar *uar; /* UAR object. */\n-\tTAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;\n-\t/**< Called by memory event callback. */\n-\tstruct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */\n \tuint8_t is_bf2; /* The device is BF2 device. */\n \tuint8_t has_umr; /* The device supports UMR. */\n \tuint32_t mmo_regex_qp_cap:1;\ndiff --git a/drivers/regex/mlx5/mlx5_regex_control.c b/drivers/regex/mlx5/mlx5_regex_control.c\nindex 6735e51976..50c966a022 100644\n--- a/drivers/regex/mlx5/mlx5_regex_control.c\n+++ b/drivers/regex/mlx5/mlx5_regex_control.c\n@@ -242,7 +242,7 @@ mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,\n \t\tnb_sq_config++;\n \t}\n \n-\tret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,\n+\tret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,\n \t\t\t\trte_socket_id());\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Error setting up mr btree\");\ndiff --git a/drivers/regex/mlx5/mlx5_regex_fastpath.c b/drivers/regex/mlx5/mlx5_regex_fastpath.c\nindex 8817e2e074..adb5343a46 100644\n--- a/drivers/regex/mlx5/mlx5_regex_fastpath.c\n+++ b/drivers/regex/mlx5/mlx5_regex_fastpath.c\n@@ -126,7 +126,7 @@ static inline uint32_t\n mlx5_regex_mb2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,\n \t\t struct rte_mbuf *mbuf)\n {\n-\treturn mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf, &priv->mr_scache);\n+\treturn mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf);\n }\n \n static inline void\n",
    "prefixes": [
        "v3",
        "16/18"
    ]
}