get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94897/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94897,
    "url": "https://patches.dpdk.org/api/patches/94897/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210628150614.1769507-1-michaelba@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210628150614.1769507-1-michaelba@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210628150614.1769507-1-michaelba@nvidia.com",
    "date": "2021-06-28T15:06:14",
    "name": "common/mlx5: share memory free callback",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "87112a83ec996e313911d523679c3a3b2659642e",
    "submitter": {
        "id": 1949,
        "url": "https://patches.dpdk.org/api/people/1949/?format=api",
        "name": "Michael Baum",
        "email": "michaelba@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210628150614.1769507-1-michaelba@nvidia.com/mbox/",
    "series": [
        {
            "id": 17502,
            "url": "https://patches.dpdk.org/api/series/17502/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17502",
            "date": "2021-06-28T15:06:14",
            "name": "common/mlx5: share memory free callback",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/17502/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94897/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/94897/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 0CBBDA0A0C;\n\tMon, 28 Jun 2021 17:06:55 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 76B0940692;\n\tMon, 28 Jun 2021 17:06:54 +0200 (CEST)",
            "from NAM11-CO1-obe.outbound.protection.outlook.com\n (mail-co1nam11on2045.outbound.protection.outlook.com [40.107.220.45])\n by mails.dpdk.org (Postfix) with ESMTP id 5DB664068A;\n Mon, 28 Jun 2021 17:06:52 +0200 (CEST)",
            "from DM5PR15CA0067.namprd15.prod.outlook.com (2603:10b6:3:ae::29) by\n CY4PR1201MB0166.namprd12.prod.outlook.com (2603:10b6:910:24::23) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.23; Mon, 28 Jun\n 2021 15:06:50 +0000",
            "from DM6NAM11FT066.eop-nam11.prod.protection.outlook.com\n (2603:10b6:3:ae:cafe::bc) by DM5PR15CA0067.outlook.office365.com\n (2603:10b6:3:ae::29) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.18 via Frontend\n Transport; Mon, 28 Jun 2021 15:06:49 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT066.mail.protection.outlook.com (10.13.173.179) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4264.18 via Frontend Transport; Mon, 28 Jun 2021 15:06:48 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 28 Jun\n 2021 15:06:44 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=XP0sEgK4Ys0jLhYUNPu5RA33ups/7/bKphw/Qj8VbnNPI1GHWp8Oz7rSk3+p4I2EgOA2syXmCHdEfZVE4CwwfCHXLkPlO/DDTgLO8Uhd60WglGzbltAm/sQra7WcXTwc7cgRjfkhYLbTtNBx8EljEX3PUeFj+n9WVQ1WYjVu88WyHYSspyjhLaDnp2XBKxrhNFmVE7JEiiqJlRetXOACXKbmXsZzfj6b0X+LZugDAHZwMVRT2GYZYbPAOtKWRP4mOJXh3XiwM4zreZnWuPhyqhdxx9dm3BxJPEJB3V4zneTi6tVqC+uNS3dqVAEON3W8hs7qYmx27H7sliG/vzU6yg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=q0yh8OlUIr7sqKOL/zuoFfj0CZMolJAdhyuiPGp75HQ=;\n b=nbiPXDUYPkbjmCKp5kVLvdy/1O/I4GYYQCrHWDjgtzj6jIOpKkgCO0fTdOLrkBthF3ChmygkAYGkcCNi38heLLdkCcJ84kiCGU5+cVpnM1DNL/19fuPOJ8oufLCeGvZjw5fpDtBjAYVWReyBKM+tBhQPHx8RpMUvf4LHsGTv7HumECAnGdQoKsq3068HkVHVFRH5QJVdkPmjXpOU6zqm9S/wSUl1CxgDyGSyDsXY5m/sWhAv99Xdrkifh3Wm9+Z3MZn+rfRuPL0NyTLyFpNj6+y4g7jZA3e+Vol6N8gP0PJDlubA92HEzwMU/xqVb9G/KkYlYVxGp4H55ThQTSLcOg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=q0yh8OlUIr7sqKOL/zuoFfj0CZMolJAdhyuiPGp75HQ=;\n b=VnFoyrhVLJ2ZgqvzctsmN5HbS7dPr+TN1awTyo2NGLiuCPK4LyO8phgTTvYxZUWUSDIsXEhpB1UU9N3JLjQ3qeOPWGoMI+t/AOSahOwFAhfbGvBni2owZkJEiZyvkkPz+82ywckd5zLKjUTtB7B9miUhpKyRysB1wiUthWiHHaWqHta4bSVebT/pzGi8K4wxcIB/UwXqAuZOY2K3PeteDqlMEfflOyvj1vLUYgWYHo5E7pqB0oxcTeNZ3XKhnbwzrc+pHrXgHGlYCaZw7U746SOSkYlSkoc6W6WZPO8NDPEpUhYUvLMsXZFV7y9hHRd6i6/oj1q5FJHP71vEtm2RVw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Michael Baum <michaelba@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Matan Azrad <matan@nvidia.com>, Raslan Darawsheh <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, <stable@dpdk.org>",
        "Date": "Mon, 28 Jun 2021 18:06:14 +0300",
        "Message-ID": "<20210628150614.1769507-1-michaelba@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "0c1dab82-cc3b-4090-2004-08d93a465a89",
        "X-MS-TrafficTypeDiagnostic": "CY4PR1201MB0166:",
        "X-Microsoft-Antispam-PRVS": "\n <CY4PR1201MB016629D570E5051B15AAC29DCC039@CY4PR1201MB0166.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:37;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n smFGjX9vBrswzLHpBGwuJnx1o3zDThQy5g5xvakErv67K1tfipSo9CkeVSXriFH7Fi9lsPqOXPGM8P9uI6bHmAW1MdEQPgIp4d+SyAVlbFy/hl0/Kw8iokh4ogyvlH6uPQe7CN8oLERVPSriwAMp+Hr4gb8Ns5/2cZ2NAwGH0Ved90W3l3SMSNSy02908vBn7ACxcZjbY0UBtntvc+HyVORzPWCX5+NA56oCIGeH3lnou4855Ra4/tmmQittwtfiXNLgHBI+syncPJn/tadU/e8acUA/29xDlp1hMXn5AnWtZ0AzPNMkF07LwkTWSnvJC2AUlOTZ5hSS9zmlYn/slJwIinohzEfy50S3GAjTJY+gaq46O2hgBOWAlbUVFvWtpS98MSWq/kd0J8ZIilxGUy2ow6l1cRJ0eIfByvAHaOPBZwbrx62fQfetj1KVzvUPWqwgej074hp9azzFy/ejb+Dm8++LxVuDY9S8H4ihE1uTuBKa7ilNd0eQdLnOGXnOELebCPvc7+9LvMsAzjQNYLgU0FOpdgBShOy6IwR5AfUSu/OC3YcU9WCw2EuNEPXHvdlPLKr1qmqY/Ca73lwZco9KwLcrt0ZOKPoGRX+QMUX7YZpsN0k/ArUqbdNqNeNIlic72cK1twStTrypelyR8JqnYxU73+q8+oSN1hdWaTit6U3A6D4Es9XWQp/5LDNuPX6QRgPqopHD8TteLsf8Zw==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(136003)(376002)(39860400002)(396003)(346002)(36840700001)(46966006)(336012)(4326008)(426003)(86362001)(8936002)(8676002)(54906003)(450100002)(6916009)(2906002)(2616005)(83380400001)(45080400002)(1076003)(478600001)(316002)(5660300002)(186003)(70586007)(7636003)(47076005)(82310400003)(6666004)(55016002)(356005)(6286002)(16526019)(26005)(7696005)(82740400003)(36756003)(36860700001)(70206006);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Jun 2021 15:06:48.9564 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 0c1dab82-cc3b-4090-2004-08d93a465a89",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT066.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY4PR1201MB0166",
        "Subject": "[dpdk-dev] [PATCH] common/mlx5: share memory free callback",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "All the mlx5 drivers using MRs for data-path must unregister the mapped\nmemory when it is freed by the dpdk process.\n\nCurrently, only the net/eth driver unregisters MRs in free event.\n\nMove the net callback handler from net driver to common.\n\nCc: stable@dpdk.org\n\nSigned-off-by: Michael Baum <michaelba@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common_mr.c | 89 +++++++++++++++++++++++++++\n drivers/common/mlx5/mlx5_common_mr.h |  3 +\n drivers/common/mlx5/version.map      |  1 +\n drivers/net/mlx5/mlx5_mr.c           | 90 +---------------------------\n 4 files changed, 95 insertions(+), 88 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c\nindex afb5b3d0a7..98fe8698e2 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.c\n+++ b/drivers/common/mlx5/mlx5_common_mr.c\n@@ -1062,6 +1062,95 @@ mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,\n \treturn mr;\n }\n \n+/**\n+ * Callback for memory free event. Iterate freed memsegs and check whether it\n+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a\n+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed\n+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a\n+ * secondary process, the garbage collector will be called in primary process\n+ * as the secondary process can't call mlx5_mr_create().\n+ *\n+ * The global cache must be rebuilt if there's any change and this event has to\n+ * be propagated to dataplane threads to flush the local caches.\n+ *\n+ * @param share_cache\n+ *   Pointer to a global shared MR cache.\n+ * @param ibdev_name\n+ *   Name of ibv device.\n+ * @param addr\n+ *   Address of freed memory.\n+ * @param len\n+ *   Size of freed memory.\n+ */\n+void\n+mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,\n+\t\t     const char *ibdev_name, const void *addr, size_t len)\n+{\n+\tconst struct rte_memseg_list *msl;\n+\tstruct mlx5_mr *mr;\n+\tint ms_n;\n+\tint i;\n+\tint rebuild = 0;\n+\n+\tDRV_LOG(DEBUG, \"device %s free callback: addr=%p, len=%zu\",\n+\t\tibdev_name, addr, len);\n+\tmsl = rte_mem_virt2memseg_list(addr);\n+\t/* addr and len must be page-aligned. */\n+\tMLX5_ASSERT((uintptr_t)addr ==\n+\t\t    RTE_ALIGN((uintptr_t)addr, msl->page_sz));\n+\tMLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));\n+\tms_n = len / msl->page_sz;\n+\trte_rwlock_write_lock(&share_cache->rwlock);\n+\t/* Clear bits of freed memsegs from MR. */\n+\tfor (i = 0; i < ms_n; ++i) {\n+\t\tconst struct rte_memseg *ms;\n+\t\tstruct mr_cache_entry entry;\n+\t\tuintptr_t start;\n+\t\tint ms_idx;\n+\t\tuint32_t pos;\n+\n+\t\t/* Find MR having this memseg. */\n+\t\tstart = (uintptr_t)addr + i * msl->page_sz;\n+\t\tmr = mlx5_mr_lookup_list(share_cache, &entry, start);\n+\t\tif (mr == NULL)\n+\t\t\tcontinue;\n+\t\tMLX5_ASSERT(mr->msl); /* Can't be external memory. */\n+\t\tms = rte_mem_virt2memseg((void *)start, msl);\n+\t\tMLX5_ASSERT(ms != NULL);\n+\t\tMLX5_ASSERT(msl->page_sz == ms->hugepage_sz);\n+\t\tms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);\n+\t\tpos = ms_idx - mr->ms_base_idx;\n+\t\tMLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));\n+\t\tMLX5_ASSERT(pos < mr->ms_bmp_n);\n+\t\tDRV_LOG(DEBUG, \"device %s MR(%p): clear bitmap[%u] for addr %p\",\n+\t\t\tibdev_name, (void *)mr, pos, (void *)start);\n+\t\trte_bitmap_clear(mr->ms_bmp, pos);\n+\t\tif (--mr->ms_n == 0) {\n+\t\t\tLIST_REMOVE(mr, mr);\n+\t\t\tLIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);\n+\t\t\tDRV_LOG(DEBUG, \"device %s remove MR(%p) from list\",\n+\t\t\t\tibdev_name, (void *)mr);\n+\t\t}\n+\t\t/*\n+\t\t * MR is fragmented or will be freed. the global cache must be\n+\t\t * rebuilt.\n+\t\t */\n+\t\trebuild = 1;\n+\t}\n+\tif (rebuild) {\n+\t\tmlx5_mr_rebuild_cache(share_cache);\n+\t\t/*\n+\t\t * No explicit wmb is needed after updating dev_gen due to\n+\t\t * store-release ordering in unlock that provides the\n+\t\t * implicit barrier at the software visible level.\n+\t\t */\n+\t\t++share_cache->dev_gen;\n+\t\tDRV_LOG(DEBUG, \"broadcasting local cache flush, gen=%d\",\n+\t\t\tshare_cache->dev_gen);\n+\t}\n+\trte_rwlock_write_unlock(&share_cache->rwlock);\n+}\n+\n /**\n  * Dump all the created MRs and the global cache entries.\n  *\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h\nindex 5cc3f097c2..6e465a05e9 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.h\n+++ b/drivers/common/mlx5/mlx5_common_mr.h\n@@ -144,6 +144,9 @@ void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);\n __rte_internal\n void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);\n __rte_internal\n+void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t  const char *ibdev_name, const void *addr, size_t len);\n+__rte_internal\n int\n mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,\n \t\t     struct mlx5_mr *mr);\ndiff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map\nindex db4f13f1f7..b8be73a77b 100644\n--- a/drivers/common/mlx5/version.map\n+++ b/drivers/common/mlx5/version.map\n@@ -103,6 +103,7 @@ INTERNAL {\n \tmlx5_mr_insert_cache;\n \tmlx5_mr_lookup_cache;\n \tmlx5_mr_lookup_list;\n+\tmlx5_free_mr_by_addr;\n \tmlx5_mr_rebuild_cache;\n \tmlx5_mr_release_cache;\n \ndiff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c\nindex 0c5403e493..0b6cfc8cb9 100644\n--- a/drivers/net/mlx5/mlx5_mr.c\n+++ b/drivers/net/mlx5/mlx5_mr.c\n@@ -31,93 +31,6 @@ struct mr_update_mp_data {\n \tint ret;\n };\n \n-/**\n- * Callback for memory free event. Iterate freed memsegs and check whether it\n- * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a\n- * result, the MR would be fragmented. If it becomes empty, the MR will be freed\n- * later by mlx5_mr_garbage_collect(). Even if this callback is called from a\n- * secondary process, the garbage collector will be called in primary process\n- * as the secondary process can't call mlx5_mr_create().\n- *\n- * The global cache must be rebuilt if there's any change and this event has to\n- * be propagated to dataplane threads to flush the local caches.\n- *\n- * @param sh\n- *   Pointer to the Ethernet device shared context.\n- * @param addr\n- *   Address of freed memory.\n- * @param len\n- *   Size of freed memory.\n- */\n-static void\n-mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh,\n-\t\t\t  const void *addr, size_t len)\n-{\n-\tconst struct rte_memseg_list *msl;\n-\tstruct mlx5_mr *mr;\n-\tint ms_n;\n-\tint i;\n-\tint rebuild = 0;\n-\n-\tDRV_LOG(DEBUG, \"device %s free callback: addr=%p, len=%zu\",\n-\t      sh->ibdev_name, addr, len);\n-\tmsl = rte_mem_virt2memseg_list(addr);\n-\t/* addr and len must be page-aligned. */\n-\tMLX5_ASSERT((uintptr_t)addr ==\n-\t\t    RTE_ALIGN((uintptr_t)addr, msl->page_sz));\n-\tMLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));\n-\tms_n = len / msl->page_sz;\n-\trte_rwlock_write_lock(&sh->share_cache.rwlock);\n-\t/* Clear bits of freed memsegs from MR. */\n-\tfor (i = 0; i < ms_n; ++i) {\n-\t\tconst struct rte_memseg *ms;\n-\t\tstruct mr_cache_entry entry;\n-\t\tuintptr_t start;\n-\t\tint ms_idx;\n-\t\tuint32_t pos;\n-\n-\t\t/* Find MR having this memseg. */\n-\t\tstart = (uintptr_t)addr + i * msl->page_sz;\n-\t\tmr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start);\n-\t\tif (mr == NULL)\n-\t\t\tcontinue;\n-\t\tMLX5_ASSERT(mr->msl); /* Can't be external memory. */\n-\t\tms = rte_mem_virt2memseg((void *)start, msl);\n-\t\tMLX5_ASSERT(ms != NULL);\n-\t\tMLX5_ASSERT(msl->page_sz == ms->hugepage_sz);\n-\t\tms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);\n-\t\tpos = ms_idx - mr->ms_base_idx;\n-\t\tMLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));\n-\t\tMLX5_ASSERT(pos < mr->ms_bmp_n);\n-\t\tDRV_LOG(DEBUG, \"device %s MR(%p): clear bitmap[%u] for addr %p\",\n-\t\t      sh->ibdev_name, (void *)mr, pos, (void *)start);\n-\t\trte_bitmap_clear(mr->ms_bmp, pos);\n-\t\tif (--mr->ms_n == 0) {\n-\t\t\tLIST_REMOVE(mr, mr);\n-\t\t\tLIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);\n-\t\t\tDRV_LOG(DEBUG, \"device %s remove MR(%p) from list\",\n-\t\t\t      sh->ibdev_name, (void *)mr);\n-\t\t}\n-\t\t/*\n-\t\t * MR is fragmented or will be freed. the global cache must be\n-\t\t * rebuilt.\n-\t\t */\n-\t\trebuild = 1;\n-\t}\n-\tif (rebuild) {\n-\t\tmlx5_mr_rebuild_cache(&sh->share_cache);\n-\t\t/*\n-\t\t * No explicit wmb is needed after updating dev_gen due to\n-\t\t * store-release ordering in unlock that provides the\n-\t\t * implicit barrier at the software visible level.\n-\t\t */\n-\t\t++sh->share_cache.dev_gen;\n-\t\tDRV_LOG(DEBUG, \"broadcasting local cache flush, gen=%d\",\n-\t\t      sh->share_cache.dev_gen);\n-\t}\n-\trte_rwlock_write_unlock(&sh->share_cache.rwlock);\n-}\n-\n /**\n  * Callback for memory event. This can be called from both primary and secondary\n  * process.\n@@ -143,7 +56,8 @@ mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,\n \t\trte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);\n \t\t/* Iterate all the existing mlx5 devices. */\n \t\tLIST_FOREACH(sh, dev_list, mem_event_cb)\n-\t\t\tmlx5_mr_mem_event_free_cb(sh, addr, len);\n+\t\t\tmlx5_free_mr_by_addr(&sh->share_cache,\n+\t\t\t\t\t     sh->ibdev_name, addr, len);\n \t\trte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);\n \t\tbreak;\n \tcase RTE_MEM_EVENT_ALLOC:\n",
    "prefixes": []
}