get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95740/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95740,
    "url": "https://patches.dpdk.org/api/patches/95740/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210713084500.19964-10-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210713084500.19964-10-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210713084500.19964-10-suanmingm@nvidia.com",
    "date": "2021-07-13T08:44:43",
    "name": "[v6,09/26] net/mlx5: minimize list critical sections",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "f846539bd24a3b9b602f0dc6c0407bffca03ca8d",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210713084500.19964-10-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17790,
            "url": "https://patches.dpdk.org/api/series/17790/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17790",
            "date": "2021-07-13T08:44:34",
            "name": "net/mlx5: insertion rate optimization",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/17790/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/95740/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/95740/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B808BA0C48;\n\tTue, 13 Jul 2021 10:46:39 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C587B4122E;\n\tTue, 13 Jul 2021 10:45:39 +0200 (CEST)",
            "from NAM02-BN1-obe.outbound.protection.outlook.com\n (mail-bn1nam07on2044.outbound.protection.outlook.com [40.107.212.44])\n by mails.dpdk.org (Postfix) with ESMTP id 01533411D0\n for <dev@dpdk.org>; Tue, 13 Jul 2021 10:45:35 +0200 (CEST)",
            "from DM5PR1401CA0010.namprd14.prod.outlook.com (2603:10b6:4:4a::20)\n by DM5PR1201MB2472.namprd12.prod.outlook.com (2603:10b6:3:e1::23)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.23; Tue, 13 Jul\n 2021 08:45:34 +0000",
            "from DM6NAM11FT009.eop-nam11.prod.protection.outlook.com\n (2603:10b6:4:4a:cafe::6e) by DM5PR1401CA0010.outlook.office365.com\n (2603:10b6:4:4a::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.20 via Frontend\n Transport; Tue, 13 Jul 2021 08:45:34 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT009.mail.protection.outlook.com (10.13.173.20) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4308.20 via Frontend Transport; Tue, 13 Jul 2021 08:45:34 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 13 Jul\n 2021 08:45:32 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=AzAgBj0qOQPNwFdb8LRtdL6NOcsDwwVSfbbnmeKB22Zf1l4OR7Eh0Q5Aum5MhQEQNb0pzeJOpUln6U+7bHd32HpZwcVSZ8A6Cicnu8RhhnZtuJxVx6g4Aa/MPft+ZkNWHCC6SMvc0ALOG/xsBBgjg7EBzr/Y1REX09562NE+NPg0f6ciWeCkQuUuCFuikEwkdfTsz+YOQrjt4l2cZv0QbyrxlOvzfLZkMijuaE5ay2qhwhb+nZdx+eZU9d6I/ONIR7gHyuKqhrAkZ2Ltu6aCDT5LzB8haqNcSy99a/lazTEcEdjo4X5K7Exxc+4vljQtOeocLvL54PWBQ1lNU3quhg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=uV93wypaIqcIjR6KAkI2lwhTr9fA7fZQQi0UUl2XMbY=;\n b=UZxwm1AfNRmN0+M+CiuxfsnRgvlxj2ZT/7KZ1bib03B75OGHMiHfnHbi6cvPajwqVSlHiraePx/1zgy5ONdvRgFVx4rLIe9+X2foVbErSjJkLnV3stIwphKETg9cjup2R7RbXFmaF7wF/TpnCpF38lL1v4TNR+8jLP6m/EPIehcqXauz2fMsn0AmB7Ut8HYtG4f/QWKMNAFgHdRq3UpxoyIFnFM8AyZTbeFKtcmUvFt1E8SopDkf3JfIJfWfn7aqonhAs1bUmWQKRQa35fKv+9c/nK2vLKMGI8mor8TmP3mz4cIzZdnlRA5t/dDq/vttUDhzw7dxfYJAT57ekLpp1g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=uV93wypaIqcIjR6KAkI2lwhTr9fA7fZQQi0UUl2XMbY=;\n b=tKnRK4d1v4Fx/feuv+wg9CkCkitJmiPOEQLAgtv9AQCnHZv7fJl4kdh4TsDfwsSp+PpLZHbnRSKP1Pr9HhSKCoU7QnumJAwNxSVIM5Mb7rtJIKkBr1WIW0GlAIunmIW+KJ1H2t5PMrJFqm7SofYMrGfAUBScgK+CQIP5wTV2AhNqusWnBWgwsRBOdP/1sFQZMDszviNDpWJiJSmGKbXMcUkMJ9ku/YfZc9mGMv8xsfroAbSKjBurFyTCGuEfaBrGdvXh0TTTJTFHxb8+ajlVwssbwlMacNOL5HiJI8Uj4bp0MvmZYjlNsLcECkux8FPKPGpXymivnFA3dW8keeRzsA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Tue, 13 Jul 2021 11:44:43 +0300",
        "Message-ID": "<20210713084500.19964-10-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210713084500.19964-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210713084500.19964-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "21237d09-21db-4e1a-199a-08d945da9486",
        "X-MS-TrafficTypeDiagnostic": "DM5PR1201MB2472:",
        "X-Microsoft-Antispam-PRVS": "\n <DM5PR1201MB24720B6E96C774B92B3481B9C1149@DM5PR1201MB2472.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:883;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n Whuahf1X0mEBYBFYVYl1ihGpn492PhfnjQUmJBx1IFTKghmpP8ob3xsxvPoYkLMd4OXYPRXFt8uXvja1ubPl91qwQZGGw4NlUxAXL04B40JvKBUvfvr69TzjnFDcSxzQZI66Ao731zYxr/LpWwZauSN5S+YVdE996gD5s8xx9eF60sX7pF05EYXlgNzzdhsjSEKFk0gWSAzXBMAn/MXfZpMCK6R/60/dYuIlu0bzkkZ7NkrBRXuZrIjENFtlj0h95AIkIPgFiSha/yGhoZQzO/HV8db8aGhEVomhKzfPM0Ut4fsbEA4BSMf1dTi5jFbQ4jVpy1loyE/0pM44IVNlJgB4GqV1nVfukgp18Y9mIuQmm5cFtcMlOu9vYryD0oTRFo14SGpJgJNjMfVJfdk+ZEGr+DcfvEFtiWs7AEo3Dt0gUnhkx9p/wJvhx23mLtRWKrAZkPR8AePEl5Aq1LYRysrMCQ+zKdZJU5UKLihq6hR0u6GPpJ/Uat132bOe0BzWK9v+nvzD1d7qRVxM7opPyI/yxg5bE0p3wgmwAkxiNzAUjicdq9Uoi2pY5tSAV6og35OQuEc4LXda3AmZBDw46Z6XJ+CiGYcqOeS4V5tMKEfOFRdxZjFMMFEWOUbuCi81UDnPS50BQOpNmlF3Cw4vIH1kqiZdAJjR5HMHf9aHAwqq0syFyu73+FKRv7sSi1j6g639NwQCUyMlgQtkg2VvYQCAdhH3KUYOXm2lgZtgydk=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(346002)(39860400002)(136003)(376002)(396003)(46966006)(36840700001)(336012)(16526019)(36906005)(6636002)(54906003)(5660300002)(2906002)(82740400003)(186003)(1076003)(36756003)(47076005)(70586007)(86362001)(316002)(70206006)(426003)(36860700001)(26005)(110136005)(55016002)(6286002)(7696005)(7636003)(6666004)(83380400001)(356005)(8936002)(82310400003)(8676002)(4326008)(478600001)(2616005)(34020700004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "13 Jul 2021 08:45:34.5649 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 21237d09-21db-4e1a-199a-08d945da9486",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT009.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM5PR1201MB2472",
        "Subject": "[dpdk-dev] [PATCH v6 09/26] net/mlx5: minimize list critical\n sections",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Matan Azrad <matan@nvidia.com>\n\nThe mlx5 internal list utility is thread safe.\n\nIn order to synchronize list access between the threads, a RW lock is\ntaken for the critical sections.\n\nThe create\\remove\\clone\\clone_free operations are in the critical\nsections.\n\nThese operations are heavy and make the critical sections heavy because\nthey are used for memory and other resources allocations\\deallocations.\n\nMoved out the operations from the critical sections and use generation\ncounter in order to detect parallel allocations.\n\nSigned-off-by: Matan Azrad <matan@nvidia.com>\nAcked-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5_utils.c | 86 ++++++++++++++++++-----------------\n drivers/net/mlx5/mlx5_utils.h |  5 +-\n 2 files changed, 48 insertions(+), 43 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c\nindex f505caed4e..c4c9adb039 100644\n--- a/drivers/net/mlx5/mlx5_utils.c\n+++ b/drivers/net/mlx5/mlx5_utils.c\n@@ -101,7 +101,7 @@ mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,\n {\n \tstruct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);\n \n-\tif (!lentry)\n+\tif (unlikely(!lentry))\n \t\treturn NULL;\n \tlentry->ref_cnt = 1u;\n \tlentry->gentry = gentry;\n@@ -112,8 +112,8 @@ mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,\n struct mlx5_list_entry *\n mlx5_list_register(struct mlx5_list *list, void *ctx)\n {\n-\tstruct mlx5_list_entry *entry, *lentry;\n-\tuint32_t prev_gen_cnt = 0;\n+\tstruct mlx5_list_entry *entry, *local_entry;\n+\tvolatile uint32_t prev_gen_cnt = 0;\n \tint lcore_index = rte_lcore_index(rte_lcore_id());\n \n \tMLX5_ASSERT(list);\n@@ -122,51 +122,56 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)\n \t\trte_errno = ENOTSUP;\n \t\treturn NULL;\n \t}\n-\t/* Lookup in local cache. */\n-\tlentry = __list_lookup(list, lcore_index, ctx, true);\n-\tif (lentry)\n-\t\treturn lentry;\n-\t/* Lookup with read lock, reuse if found. */\n+\t/* 1. Lookup in local cache. */\n+\tlocal_entry = __list_lookup(list, lcore_index, ctx, true);\n+\tif (local_entry)\n+\t\treturn local_entry;\n+\t/* 2. Lookup with read lock on global list, reuse if found. */\n \trte_rwlock_read_lock(&list->lock);\n \tentry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);\n-\tif (entry == NULL) {\n-\t\tprev_gen_cnt = __atomic_load_n(&list->gen_cnt,\n-\t\t\t\t\t       __ATOMIC_ACQUIRE);\n-\t\trte_rwlock_read_unlock(&list->lock);\n-\t} else {\n+\tif (likely(entry)) {\n \t\trte_rwlock_read_unlock(&list->lock);\n \t\treturn mlx5_list_cache_insert(list, lcore_index, entry, ctx);\n \t}\n-\t/* Not found, append with write lock - block read from other threads. */\n+\tprev_gen_cnt = list->gen_cnt;\n+\trte_rwlock_read_unlock(&list->lock);\n+\t/* 3. Prepare new entry for global list and for cache. */\n+\tentry = list->cb_create(list, entry, ctx);\n+\tif (unlikely(!entry))\n+\t\treturn NULL;\n+\tlocal_entry = list->cb_clone(list, entry, ctx);\n+\tif (unlikely(!local_entry)) {\n+\t\tlist->cb_remove(list, entry);\n+\t\treturn NULL;\n+\t}\n+\tentry->ref_cnt = 1u;\n+\tlocal_entry->ref_cnt = 1u;\n+\tlocal_entry->gentry = entry;\n \trte_rwlock_write_lock(&list->lock);\n-\t/* If list changed by other threads before lock, search again. */\n-\tif (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {\n-\t\t/* Lookup and reuse w/o read lock. */\n-\t\tentry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);\n-\t\tif (entry) {\n+\t/* 4. Make sure the same entry was not created before the write lock. */\n+\tif (unlikely(prev_gen_cnt != list->gen_cnt)) {\n+\t\tstruct mlx5_list_entry *oentry = __list_lookup(list,\n+\t\t\t\t\t\t\t       RTE_MAX_LCORE,\n+\t\t\t\t\t\t\t       ctx, true);\n+\n+\t\tif (unlikely(oentry)) {\n+\t\t\t/* 4.5. Found real race!!, reuse the old entry. */\n \t\t\trte_rwlock_write_unlock(&list->lock);\n-\t\t\treturn mlx5_list_cache_insert(list, lcore_index, entry,\n-\t\t\t\t\t\t      ctx);\n-\t\t}\n-\t}\n-\tentry = list->cb_create(list, entry, ctx);\n-\tif (entry) {\n-\t\tlentry = mlx5_list_cache_insert(list, lcore_index, entry, ctx);\n-\t\tif (!lentry) {\n \t\t\tlist->cb_remove(list, entry);\n-\t\t} else {\n-\t\t\tentry->ref_cnt = 1u;\n-\t\t\tLIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry,\n-\t\t\t\t\t next);\n-\t\t\t__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);\n-\t\t\t__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n-\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\",\n-\t\t\t\tlist->name, (void *)entry, entry->ref_cnt);\n+\t\t\tlist->cb_clone_free(list, local_entry);\n+\t\t\treturn mlx5_list_cache_insert(list, lcore_index, oentry,\n+\t\t\t\t\t\t      ctx);\n \t\t}\n-\n \t}\n+\t/* 5. Update lists. */\n+\tLIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);\n+\tlist->gen_cnt++;\n \trte_rwlock_write_unlock(&list->lock);\n-\treturn lentry;\n+\tLIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);\n+\t__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n+\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\",\n+\t\tlist->name, (void *)entry, entry->ref_cnt);\n+\treturn local_entry;\n }\n \n int\n@@ -180,12 +185,11 @@ mlx5_list_unregister(struct mlx5_list *list,\n \tif (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_ACQUIRE) != 0)\n \t\treturn 1;\n \trte_rwlock_write_lock(&list->lock);\n-\tif (__atomic_load_n(&gentry->ref_cnt, __ATOMIC_ACQUIRE) == 0) {\n-\t\t__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);\n-\t\t__atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n+\tif (likely(gentry->ref_cnt == 0)) {\n \t\tLIST_REMOVE(gentry, next);\n-\t\tlist->cb_remove(list, gentry);\n \t\trte_rwlock_write_unlock(&list->lock);\n+\t\tlist->cb_remove(list, gentry);\n+\t\t__atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n \t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p removed.\",\n \t\t\tlist->name, (void *)gentry);\n \t\treturn 0;\ndiff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h\nindex 9e3fe0cb85..6dade8238d 100644\n--- a/drivers/net/mlx5/mlx5_utils.h\n+++ b/drivers/net/mlx5/mlx5_utils.h\n@@ -388,8 +388,9 @@ typedef struct mlx5_list_entry *(*mlx5_list_create_cb)\n  */\n struct mlx5_list {\n \tchar name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */\n-\tuint32_t gen_cnt; /* List modification will update generation count. */\n-\tuint32_t count; /* number of entries in list. */\n+\tvolatile uint32_t gen_cnt;\n+\t/* List modification will update generation count. */\n+\tvolatile uint32_t count; /* number of entries in list. */\n \tvoid *ctx; /* user objects target to callback. */\n \trte_rwlock_t lock; /* read/write lock. */\n \tmlx5_list_create_cb cb_create; /**< entry create callback. */\n",
    "prefixes": [
        "v6",
        "09/26"
    ]
}