get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95405/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95405,
    "url": "https://patches.dpdk.org/api/patches/95405/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210706133257.3353-19-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210706133257.3353-19-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210706133257.3353-19-suanmingm@nvidia.com",
    "date": "2021-07-06T13:32:49",
    "name": "[v4,18/26] common/mlx5: optimize cache list object memory",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3344c354e1d33c809545aa670f313bbb59c5e7ad",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210706133257.3353-19-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17668,
            "url": "https://patches.dpdk.org/api/series/17668/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17668",
            "date": "2021-07-06T13:32:31",
            "name": "net/mlx5: insertion rate optimization",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/17668/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/95405/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/95405/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 006FCA0C47;\n\tTue,  6 Jul 2021 15:35:56 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CAB1941465;\n\tTue,  6 Jul 2021 15:33:55 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2068.outbound.protection.outlook.com [40.107.243.68])\n by mails.dpdk.org (Postfix) with ESMTP id DBD3541457\n for <dev@dpdk.org>; Tue,  6 Jul 2021 15:33:52 +0200 (CEST)",
            "from BN9PR03CA0557.namprd03.prod.outlook.com (2603:10b6:408:138::22)\n by BYAPR12MB4613.namprd12.prod.outlook.com (2603:10b6:a03:a4::23)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.31; Tue, 6 Jul\n 2021 13:33:50 +0000",
            "from BN8NAM11FT062.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:138:cafe::65) by BN9PR03CA0557.outlook.office365.com\n (2603:10b6:408:138::22) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.19 via Frontend\n Transport; Tue, 6 Jul 2021 13:33:50 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT062.mail.protection.outlook.com (10.13.177.34) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4287.22 via Frontend Transport; Tue, 6 Jul 2021 13:33:50 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 6 Jul\n 2021 13:33:48 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=JrJGXCR4mIinSu9TbtB74F0694zOcXS10vqWQn++Sxdh74ieYNP9T0ESI1w3lK9LNJN33yLNOZ2OdNJsDhjzXQXaYkoF8W7TW2clQen4vF5gKdjaKEeFUsGR7ATIiy5v7AJ2oj5P7UUEW+QYDcoWbPW9mzHOpZKQ6sSTKeTEOSZxbx94EUnsXuAIwVFzZIJA6KLpElgkpXvxDn8aCSRTZm+4OTHyBXWXP9YoW/WvpCdwsjmZt6ZjSHNgvmygKPBfhYyohq0AsiNJJvLB6b/yLWhvHmqDXFUt/+C6/gT4LXvT+FEucv/eSb3mlPTHIxv69rHubOIV4N+XJdxHpeZseQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=+prwpKwZEFeTCRISoJBC+4OIHuK9yrgFZRE9UtgB00Q=;\n b=g5B5PzebhxlpZouwUzCue4Yl+dW72sXTRm87EOoi/jAdxNeJKGDdpvszTVJ4WTVhplzrOFcqLaWIgm6yVVARiR9gvynEGR7llVkdHXBeQ7vDCPkvRXvC490yiWYDFuSRjDNomoLHw1Haw/QifTxg4x4R3VEY341b2D1CG1VG1JvV0AMauNv0CnBANhMImLDN5vhUZo+5QbVfYQwJQNPM+ACUzGvK8syQahSRBIfMW2gsG9PG85nyQC55Q2F0caaqW9pyzbFI0Yyln4XraNRyQ5S5OzqJDK+ca3X1sqARNJNrL1/qS4ISGY3jZDdZN+V1pphu8glsggmlKWLIRyq24A==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=+prwpKwZEFeTCRISoJBC+4OIHuK9yrgFZRE9UtgB00Q=;\n b=sIbkSpmoXrHnpcx82kocZjsSGQIm8+kseC5EgY6wA/x9eFVCs+sNLJXF+XsjJP1aRDyAR2f7a6mTb03aEBKqPoO+cgco4QZqQilrwJBxNi+IzQ93twp8DUXXhV2zZhLfWewpCMcH6bIqQ2/QR9g1SfyTODBcTGT50oDkJ6ytKT2EUQ11UQfmkbMjiiHoVGw9XMze+Zz9EVirzdp7GWnqxs5A/n9DFjA36QoC6lQRGcTCeDQ3AD+qe8R/26yEoViYebEiiTTaOPBJt91i9zltLfhtOqXmdzBnqFxyxzNH7EqTz0b35rlOsHyKDJL0ZPpW+Ttze/m48T3q95Qdr1yISw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Tue, 6 Jul 2021 16:32:49 +0300",
        "Message-ID": "<20210706133257.3353-19-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210706133257.3353-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210706133257.3353-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "e775241a-aeaf-4d92-dbf8-08d94082b0d9",
        "X-MS-TrafficTypeDiagnostic": "BYAPR12MB4613:",
        "X-Microsoft-Antispam-PRVS": "\n <BYAPR12MB461352950E418F8C6A4E6745C11B9@BYAPR12MB4613.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:46;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n C3bs+kC5/tICtJP+9rboFS6RQY5rmsyGtHxdToqlVHfYUNlATmPXT4Wxo4QRtq/ABB9Bft1KicNdKcuGgq4H06KCdIWSXu1LmfZQfhd1QKpeRpnrH6dGldR/EYDQb39LtXu4xIRrH+NDwTeZOovqBGyJ21gVeAJgORxENq2/QWtnj/vzwsJECWPACAM95u2yk/StjKcI/iQLLzfTmLpzYO0MaAKACfLjx56fGvX2eYTLRA4Bzz5CvXsrwTGKjRfgC2ekWXjMvBwJ+wtwOuCUjOpyiPcmq59TUYALZbGz2sXo1cs3lperxGieGQdINkN+D5tb0QBY345zQF1VUXKYfDu/uNtRqVYD2tf1B8iFzOwaSjJt+Fp6BlXCD0EoXI/xiy61rWPIuAay7CuZA0LEa+aMGzhsZeHEwCK0oKe/VwLguty8xUBfuWIa0AYJmpTg4okLWuw824k/9xzbuvAhduEkGcCQlleHbdtIxgRe/pEc+Ll6TYWiNzMXsFOFYftyUEK7NiDVAz3909DC2KyyrZxPzB7H+A4zHsM7v6eNWVNI6qwyWei+4ZAngrKQPFPS8JA2h0eW0RF5j6xOxqT5N6yABAJ9N4IwNJX6/faJlIrgk6PxUHoMEARgjj/2gU9fhfj1ym7vytQRHDxQDdyKELJQ2sDSdYNc3WsAITw1MK7Bjftg10MPa2tTAvI4saRNhZvKNiQS3SsaSVMg+Q1GYQ==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(39860400002)(396003)(346002)(136003)(376002)(46966006)(36840700001)(7636003)(2906002)(6286002)(36860700001)(478600001)(16526019)(30864003)(36756003)(86362001)(83380400001)(47076005)(6636002)(186003)(4326008)(8936002)(356005)(5660300002)(8676002)(316002)(55016002)(336012)(82740400003)(82310400003)(110136005)(2616005)(36906005)(7696005)(70206006)(426003)(70586007)(54906003)(26005)(1076003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jul 2021 13:33:50.4860 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n e775241a-aeaf-4d92-dbf8-08d94082b0d9",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT062.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR12MB4613",
        "Subject": "[dpdk-dev] [PATCH v4 18/26] common/mlx5: optimize cache list object\n memory",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Currently, hash list uses the cache list as bucket list. The list\nin the buckets have the same name, ctx and callbacks. This wastes\nthe memory.\n\nThis commit abstracts all the name, ctx and callback members in the\nlist to a constant struct and others to the inconstant struct, uses\nthe wrapper functions to satisfy both hash list and cache list can\nset the list constant and inconstant struct individually.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common_utils.c | 295 ++++++++++++++----------\n drivers/common/mlx5/mlx5_common_utils.h |  45 ++--\n 2 files changed, 201 insertions(+), 139 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c\nindex f75b1cb0da..858c8d8164 100644\n--- a/drivers/common/mlx5/mlx5_common_utils.c\n+++ b/drivers/common/mlx5/mlx5_common_utils.c\n@@ -14,34 +14,16 @@\n /********************* mlx5 list ************************/\n \n static int\n-mlx5_list_init(struct mlx5_list *list, const char *name, void *ctx,\n-\t       bool lcores_share, struct mlx5_list_cache *gc,\n-\t       mlx5_list_create_cb cb_create,\n-\t       mlx5_list_match_cb cb_match,\n-\t       mlx5_list_remove_cb cb_remove,\n-\t       mlx5_list_clone_cb cb_clone,\n-\t       mlx5_list_clone_free_cb cb_clone_free)\n+mlx5_list_init(struct mlx5_list_inconst *l_inconst,\n+\t       struct mlx5_list_const *l_const,\n+\t       struct mlx5_list_cache *gc)\n {\n-\tif (!cb_match || !cb_create || !cb_remove || !cb_clone ||\n-\t    !cb_clone_free) {\n-\t\trte_errno = EINVAL;\n-\t\treturn -EINVAL;\n+\trte_rwlock_init(&l_inconst->lock);\n+\tif (l_const->lcores_share) {\n+\t\tl_inconst->cache[RTE_MAX_LCORE] = gc;\n+\t\tLIST_INIT(&l_inconst->cache[RTE_MAX_LCORE]->h);\n \t}\n-\tif (name)\n-\t\tsnprintf(list->name, sizeof(list->name), \"%s\", name);\n-\tlist->ctx = ctx;\n-\tlist->lcores_share = lcores_share;\n-\tlist->cb_create = cb_create;\n-\tlist->cb_match = cb_match;\n-\tlist->cb_remove = cb_remove;\n-\tlist->cb_clone = cb_clone;\n-\tlist->cb_clone_free = cb_clone_free;\n-\trte_rwlock_init(&list->lock);\n-\tif (lcores_share) {\n-\t\tlist->cache[RTE_MAX_LCORE] = gc;\n-\t\tLIST_INIT(&list->cache[RTE_MAX_LCORE]->h);\n-\t}\n-\tDRV_LOG(DEBUG, \"mlx5 list %s initialized.\", list->name);\n+\tDRV_LOG(DEBUG, \"mlx5 list %s initialized.\", l_const->name);\n \treturn 0;\n }\n \n@@ -56,16 +38,30 @@ mlx5_list_create(const char *name, void *ctx, bool lcores_share,\n \tstruct mlx5_list *list;\n \tstruct mlx5_list_cache *gc = NULL;\n \n+\tif (!cb_match || !cb_create || !cb_remove || !cb_clone ||\n+\t    !cb_clone_free) {\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n \tlist = mlx5_malloc(MLX5_MEM_ZERO,\n \t\t\t   sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),\n \t\t\t   0, SOCKET_ID_ANY);\n+\n \tif (!list)\n \t\treturn NULL;\n+\tif (name)\n+\t\tsnprintf(list->l_const.name,\n+\t\t\t sizeof(list->l_const.name), \"%s\", name);\n+\tlist->l_const.ctx = ctx;\n+\tlist->l_const.lcores_share = lcores_share;\n+\tlist->l_const.cb_create = cb_create;\n+\tlist->l_const.cb_match = cb_match;\n+\tlist->l_const.cb_remove = cb_remove;\n+\tlist->l_const.cb_clone = cb_clone;\n+\tlist->l_const.cb_clone_free = cb_clone_free;\n \tif (lcores_share)\n \t\tgc = (struct mlx5_list_cache *)(list + 1);\n-\tif (mlx5_list_init(list, name, ctx, lcores_share, gc,\n-\t\t\t   cb_create, cb_match, cb_remove, cb_clone,\n-\t\t\t   cb_clone_free) != 0) {\n+\tif (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) {\n \t\tmlx5_free(list);\n \t\treturn NULL;\n \t}\n@@ -73,19 +69,21 @@ mlx5_list_create(const char *name, void *ctx, bool lcores_share,\n }\n \n static struct mlx5_list_entry *\n-__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)\n+__list_lookup(struct mlx5_list_inconst *l_inconst,\n+\t      struct mlx5_list_const *l_const,\n+\t      int lcore_index, void *ctx, bool reuse)\n {\n \tstruct mlx5_list_entry *entry =\n-\t\t\t\tLIST_FIRST(&list->cache[lcore_index]->h);\n+\t\t\t\tLIST_FIRST(&l_inconst->cache[lcore_index]->h);\n \tuint32_t ret;\n \n \twhile (entry != NULL) {\n-\t\tif (list->cb_match(list->ctx, entry, ctx) == 0) {\n+\t\tif (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {\n \t\t\tif (reuse) {\n \t\t\t\tret = __atomic_add_fetch(&entry->ref_cnt, 1,\n \t\t\t\t\t\t\t __ATOMIC_RELAXED) - 1;\n \t\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p ref: %u.\",\n-\t\t\t\t\tlist->name, (void *)entry,\n+\t\t\t\t\tl_const->name, (void *)entry,\n \t\t\t\t\tentry->ref_cnt);\n \t\t\t} else if (lcore_index < RTE_MAX_LCORE) {\n \t\t\t\tret = __atomic_load_n(&entry->ref_cnt,\n@@ -101,41 +99,55 @@ __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)\n \treturn NULL;\n }\n \n-struct mlx5_list_entry *\n-mlx5_list_lookup(struct mlx5_list *list, void *ctx)\n+static inline struct mlx5_list_entry *\n+_mlx5_list_lookup(struct mlx5_list_inconst *l_inconst,\n+\t\t  struct mlx5_list_const *l_const, void *ctx)\n {\n \tstruct mlx5_list_entry *entry = NULL;\n \tint i;\n \n-\trte_rwlock_read_lock(&list->lock);\n+\trte_rwlock_read_lock(&l_inconst->lock);\n \tfor (i = 0; i < RTE_MAX_LCORE; i++) {\n-\t\tentry = __list_lookup(list, i, ctx, false);\n+\t\tif (!l_inconst->cache[i])\n+\t\t\tcontinue;\n+\t\tentry = __list_lookup(l_inconst, l_const, i, ctx, false);\n \t\tif (entry)\n \t\t\tbreak;\n \t}\n-\trte_rwlock_read_unlock(&list->lock);\n+\trte_rwlock_read_unlock(&l_inconst->lock);\n \treturn entry;\n }\n \n+struct mlx5_list_entry *\n+mlx5_list_lookup(struct mlx5_list *list, void *ctx)\n+{\n+\treturn _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx);\n+}\n+\n+\n static struct mlx5_list_entry *\n-mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,\n+mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst,\n+\t\t       struct mlx5_list_const *l_const, int lcore_index,\n \t\t       struct mlx5_list_entry *gentry, void *ctx)\n {\n-\tstruct mlx5_list_entry *lentry = list->cb_clone(list->ctx, gentry, ctx);\n+\tstruct mlx5_list_entry *lentry =\n+\t\t\tl_const->cb_clone(l_const->ctx, gentry, ctx);\n \n \tif (unlikely(!lentry))\n \t\treturn NULL;\n \tlentry->ref_cnt = 1u;\n \tlentry->gentry = gentry;\n \tlentry->lcore_idx = (uint32_t)lcore_index;\n-\tLIST_INSERT_HEAD(&list->cache[lcore_index]->h, lentry, next);\n+\tLIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next);\n \treturn lentry;\n }\n \n static void\n-__list_cache_clean(struct mlx5_list *list, int lcore_index)\n+__list_cache_clean(struct mlx5_list_inconst *l_inconst,\n+\t\t   struct mlx5_list_const *l_const,\n+\t\t   int lcore_index)\n {\n-\tstruct mlx5_list_cache *c = list->cache[lcore_index];\n+\tstruct mlx5_list_cache *c = l_inconst->cache[lcore_index];\n \tstruct mlx5_list_entry *entry = LIST_FIRST(&c->h);\n \tuint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,\n \t\t\t\t\t       __ATOMIC_RELAXED);\n@@ -145,108 +157,123 @@ __list_cache_clean(struct mlx5_list *list, int lcore_index)\n \n \t\tif (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {\n \t\t\tLIST_REMOVE(entry, next);\n-\t\t\tif (list->lcores_share)\n-\t\t\t\tlist->cb_clone_free(list->ctx, entry);\n+\t\t\tif (l_const->lcores_share)\n+\t\t\t\tl_const->cb_clone_free(l_const->ctx, entry);\n \t\t\telse\n-\t\t\t\tlist->cb_remove(list->ctx, entry);\n+\t\t\t\tl_const->cb_remove(l_const->ctx, entry);\n \t\t\tinv_cnt--;\n \t\t}\n \t\tentry = nentry;\n \t}\n }\n \n-struct mlx5_list_entry *\n-mlx5_list_register(struct mlx5_list *list, void *ctx)\n+static inline struct mlx5_list_entry *\n+_mlx5_list_register(struct mlx5_list_inconst *l_inconst,\n+\t\t    struct mlx5_list_const *l_const,\n+\t\t    void *ctx)\n {\n \tstruct mlx5_list_entry *entry, *local_entry;\n \tvolatile uint32_t prev_gen_cnt = 0;\n \tint lcore_index = rte_lcore_index(rte_lcore_id());\n \n-\tMLX5_ASSERT(list);\n+\tMLX5_ASSERT(l_inconst);\n \tMLX5_ASSERT(lcore_index < RTE_MAX_LCORE);\n \tif (unlikely(lcore_index == -1)) {\n \t\trte_errno = ENOTSUP;\n \t\treturn NULL;\n \t}\n-\tif (unlikely(!list->cache[lcore_index])) {\n-\t\tlist->cache[lcore_index] = mlx5_malloc(0,\n+\tif (unlikely(!l_inconst->cache[lcore_index])) {\n+\t\tl_inconst->cache[lcore_index] = mlx5_malloc(0,\n \t\t\t\t\tsizeof(struct mlx5_list_cache),\n \t\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n-\t\tif (!list->cache[lcore_index]) {\n+\t\tif (!l_inconst->cache[lcore_index]) {\n \t\t\trte_errno = ENOMEM;\n \t\t\treturn NULL;\n \t\t}\n-\t\tlist->cache[lcore_index]->inv_cnt = 0;\n-\t\tLIST_INIT(&list->cache[lcore_index]->h);\n+\t\tl_inconst->cache[lcore_index]->inv_cnt = 0;\n+\t\tLIST_INIT(&l_inconst->cache[lcore_index]->h);\n \t}\n \t/* 0. Free entries that was invalidated by other lcores. */\n-\t__list_cache_clean(list, lcore_index);\n+\t__list_cache_clean(l_inconst, l_const, lcore_index);\n \t/* 1. Lookup in local cache. */\n-\tlocal_entry = __list_lookup(list, lcore_index, ctx, true);\n+\tlocal_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true);\n \tif (local_entry)\n \t\treturn local_entry;\n-\tif (list->lcores_share) {\n+\tif (l_const->lcores_share) {\n \t\t/* 2. Lookup with read lock on global list, reuse if found. */\n-\t\trte_rwlock_read_lock(&list->lock);\n-\t\tentry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);\n+\t\trte_rwlock_read_lock(&l_inconst->lock);\n+\t\tentry = __list_lookup(l_inconst, l_const, RTE_MAX_LCORE,\n+\t\t\t\t      ctx, true);\n \t\tif (likely(entry)) {\n-\t\t\trte_rwlock_read_unlock(&list->lock);\n-\t\t\treturn mlx5_list_cache_insert(list, lcore_index, entry,\n-\t\t\t\t\t\t      ctx);\n+\t\t\trte_rwlock_read_unlock(&l_inconst->lock);\n+\t\t\treturn mlx5_list_cache_insert(l_inconst, l_const,\n+\t\t\t\t\t\t      lcore_index,\n+\t\t\t\t\t\t      entry, ctx);\n \t\t}\n-\t\tprev_gen_cnt = list->gen_cnt;\n-\t\trte_rwlock_read_unlock(&list->lock);\n+\t\tprev_gen_cnt = l_inconst->gen_cnt;\n+\t\trte_rwlock_read_unlock(&l_inconst->lock);\n \t}\n \t/* 3. Prepare new entry for global list and for cache. */\n-\tentry = list->cb_create(list->ctx, ctx);\n+\tentry = l_const->cb_create(l_const->ctx, ctx);\n \tif (unlikely(!entry))\n \t\treturn NULL;\n \tentry->ref_cnt = 1u;\n-\tif (!list->lcores_share) {\n+\tif (!l_const->lcores_share) {\n \t\tentry->lcore_idx = (uint32_t)lcore_index;\n-\t\tLIST_INSERT_HEAD(&list->cache[lcore_index]->h, entry, next);\n-\t\t__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);\n+\t\tLIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,\n+\t\t\t\t entry, next);\n+\t\t__atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);\n \t\tDRV_LOG(DEBUG, \"MLX5 list %s c%d entry %p new: %u.\",\n-\t\t\tlist->name, lcore_index, (void *)entry, entry->ref_cnt);\n+\t\t\tl_const->name, lcore_index,\n+\t\t\t(void *)entry, entry->ref_cnt);\n \t\treturn entry;\n \t}\n-\tlocal_entry = list->cb_clone(list->ctx, entry, ctx);\n+\tlocal_entry = l_const->cb_clone(l_const->ctx, entry, ctx);\n \tif (unlikely(!local_entry)) {\n-\t\tlist->cb_remove(list->ctx, entry);\n+\t\tl_const->cb_remove(l_const->ctx, entry);\n \t\treturn NULL;\n \t}\n \tlocal_entry->ref_cnt = 1u;\n \tlocal_entry->gentry = entry;\n \tlocal_entry->lcore_idx = (uint32_t)lcore_index;\n-\trte_rwlock_write_lock(&list->lock);\n+\trte_rwlock_write_lock(&l_inconst->lock);\n \t/* 4. Make sure the same entry was not created before the write lock. */\n-\tif (unlikely(prev_gen_cnt != list->gen_cnt)) {\n-\t\tstruct mlx5_list_entry *oentry = __list_lookup(list,\n+\tif (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) {\n+\t\tstruct mlx5_list_entry *oentry = __list_lookup(l_inconst,\n+\t\t\t\t\t\t\t       l_const,\n \t\t\t\t\t\t\t       RTE_MAX_LCORE,\n \t\t\t\t\t\t\t       ctx, true);\n \n \t\tif (unlikely(oentry)) {\n \t\t\t/* 4.5. Found real race!!, reuse the old entry. */\n-\t\t\trte_rwlock_write_unlock(&list->lock);\n-\t\t\tlist->cb_remove(list->ctx, entry);\n-\t\t\tlist->cb_clone_free(list->ctx, local_entry);\n-\t\t\treturn mlx5_list_cache_insert(list, lcore_index, oentry,\n-\t\t\t\t\t\t      ctx);\n+\t\t\trte_rwlock_write_unlock(&l_inconst->lock);\n+\t\t\tl_const->cb_remove(l_const->ctx, entry);\n+\t\t\tl_const->cb_clone_free(l_const->ctx, local_entry);\n+\t\t\treturn mlx5_list_cache_insert(l_inconst, l_const,\n+\t\t\t\t\t\t      lcore_index,\n+\t\t\t\t\t\t      oentry, ctx);\n \t\t}\n \t}\n \t/* 5. Update lists. */\n-\tLIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE]->h, entry, next);\n-\tlist->gen_cnt++;\n-\trte_rwlock_write_unlock(&list->lock);\n-\tLIST_INSERT_HEAD(&list->cache[lcore_index]->h, local_entry, next);\n-\t__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);\n-\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\", list->name,\n+\tLIST_INSERT_HEAD(&l_inconst->cache[RTE_MAX_LCORE]->h, entry, next);\n+\tl_inconst->gen_cnt++;\n+\trte_rwlock_write_unlock(&l_inconst->lock);\n+\tLIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);\n+\t__atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);\n+\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\", l_const->name,\n \t\t(void *)entry, entry->ref_cnt);\n \treturn local_entry;\n }\n \n-int\n-mlx5_list_unregister(struct mlx5_list *list,\n+struct mlx5_list_entry *\n+mlx5_list_register(struct mlx5_list *list, void *ctx)\n+{\n+\treturn _mlx5_list_register(&list->l_inconst, &list->l_const, ctx);\n+}\n+\n+static inline int\n+_mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,\n+\t\t      struct mlx5_list_const *l_const,\n \t\t      struct mlx5_list_entry *entry)\n {\n \tstruct mlx5_list_entry *gentry = entry->gentry;\n@@ -258,69 +285,77 @@ mlx5_list_unregister(struct mlx5_list *list,\n \tMLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);\n \tif (entry->lcore_idx == (uint32_t)lcore_idx) {\n \t\tLIST_REMOVE(entry, next);\n-\t\tif (list->lcores_share)\n-\t\t\tlist->cb_clone_free(list->ctx, entry);\n+\t\tif (l_const->lcores_share)\n+\t\t\tl_const->cb_clone_free(l_const->ctx, entry);\n \t\telse\n-\t\t\tlist->cb_remove(list->ctx, entry);\n+\t\t\tl_const->cb_remove(l_const->ctx, entry);\n \t} else if (likely(lcore_idx != -1)) {\n-\t\t__atomic_add_fetch(&list->cache[entry->lcore_idx]->inv_cnt, 1,\n-\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t__atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt,\n+\t\t\t\t   1, __ATOMIC_RELAXED);\n \t} else {\n \t\treturn 0;\n \t}\n-\tif (!list->lcores_share) {\n-\t\t__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);\n+\tif (!l_const->lcores_share) {\n+\t\t__atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);\n \t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p removed.\",\n-\t\t\tlist->name, (void *)entry);\n+\t\t\tl_const->name, (void *)entry);\n \t\treturn 0;\n \t}\n \tif (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)\n \t\treturn 1;\n-\trte_rwlock_write_lock(&list->lock);\n+\trte_rwlock_write_lock(&l_inconst->lock);\n \tif (likely(gentry->ref_cnt == 0)) {\n \t\tLIST_REMOVE(gentry, next);\n-\t\trte_rwlock_write_unlock(&list->lock);\n-\t\tlist->cb_remove(list->ctx, gentry);\n-\t\t__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);\n+\t\trte_rwlock_write_unlock(&l_inconst->lock);\n+\t\tl_const->cb_remove(l_const->ctx, gentry);\n+\t\t__atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);\n \t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p removed.\",\n-\t\t\tlist->name, (void *)gentry);\n+\t\t\tl_const->name, (void *)gentry);\n \t\treturn 0;\n \t}\n-\trte_rwlock_write_unlock(&list->lock);\n+\trte_rwlock_write_unlock(&l_inconst->lock);\n \treturn 1;\n }\n \n+int\n+mlx5_list_unregister(struct mlx5_list *list,\n+\t\t      struct mlx5_list_entry *entry)\n+{\n+\treturn _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry);\n+}\n+\n static void\n-mlx5_list_uninit(struct mlx5_list *list)\n+mlx5_list_uninit(struct mlx5_list_inconst *l_inconst,\n+\t\t struct mlx5_list_const *l_const)\n {\n \tstruct mlx5_list_entry *entry;\n \tint i;\n \n-\tMLX5_ASSERT(list);\n+\tMLX5_ASSERT(l_inconst);\n \tfor (i = 0; i <= RTE_MAX_LCORE; i++) {\n-\t\tif (!list->cache[i])\n+\t\tif (!l_inconst->cache[i])\n \t\t\tcontinue;\n-\t\twhile (!LIST_EMPTY(&list->cache[i]->h)) {\n-\t\t\tentry = LIST_FIRST(&list->cache[i]->h);\n+\t\twhile (!LIST_EMPTY(&l_inconst->cache[i]->h)) {\n+\t\t\tentry = LIST_FIRST(&l_inconst->cache[i]->h);\n \t\t\tLIST_REMOVE(entry, next);\n \t\t\tif (i == RTE_MAX_LCORE) {\n-\t\t\t\tlist->cb_remove(list->ctx, entry);\n+\t\t\t\tl_const->cb_remove(l_const->ctx, entry);\n \t\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p \"\n-\t\t\t\t\t\"destroyed.\", list->name,\n+\t\t\t\t\t\"destroyed.\", l_const->name,\n \t\t\t\t\t(void *)entry);\n \t\t\t} else {\n-\t\t\t\tlist->cb_clone_free(list->ctx, entry);\n+\t\t\t\tl_const->cb_clone_free(l_const->ctx, entry);\n \t\t\t}\n \t\t}\n \t\tif (i != RTE_MAX_LCORE)\n-\t\t\tmlx5_free(list->cache[i]);\n+\t\t\tmlx5_free(l_inconst->cache[i]);\n \t}\n }\n \n void\n mlx5_list_destroy(struct mlx5_list *list)\n {\n-\tmlx5_list_uninit(list);\n+\tmlx5_list_uninit(&list->l_inconst, &list->l_const);\n \tmlx5_free(list);\n }\n \n@@ -328,7 +363,7 @@ uint32_t\n mlx5_list_get_entry_num(struct mlx5_list *list)\n {\n \tMLX5_ASSERT(list);\n-\treturn __atomic_load_n(&list->count, __ATOMIC_RELAXED);\n+\treturn __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);\n }\n \n /********************* Hash List **********************/\n@@ -347,6 +382,11 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \tuint32_t alloc_size;\n \tuint32_t i;\n \n+\tif (!cb_match || !cb_create || !cb_remove || !cb_clone ||\n+\t    !cb_clone_free) {\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n \t/* Align to the next power of 2, 32bits integer is enough now. */\n \tif (!rte_is_power_of_2(size)) {\n \t\tact_size = rte_align32pow2(size);\n@@ -356,7 +396,7 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \t\tact_size = size;\n \t}\n \talloc_size = sizeof(struct mlx5_hlist) +\n-\t\t     sizeof(struct mlx5_hlist_bucket)  * act_size;\n+\t\t     sizeof(struct mlx5_hlist_bucket) * act_size;\n \tif (lcores_share)\n \t\talloc_size += sizeof(struct mlx5_list_cache)  * act_size;\n \t/* Using zmalloc, then no need to initialize the heads. */\n@@ -367,15 +407,21 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \t\t\tname ? name : \"None\");\n \t\treturn NULL;\n \t}\n+\tif (name)\n+\t\tsnprintf(h->l_const.name, sizeof(h->l_const.name), \"%s\", name);\n+\th->l_const.ctx = ctx;\n+\th->l_const.lcores_share = lcores_share;\n+\th->l_const.cb_create = cb_create;\n+\th->l_const.cb_match = cb_match;\n+\th->l_const.cb_remove = cb_remove;\n+\th->l_const.cb_clone = cb_clone;\n+\th->l_const.cb_clone_free = cb_clone_free;\n \th->mask = act_size - 1;\n-\th->lcores_share = lcores_share;\n \th->direct_key = direct_key;\n \tgc = (struct mlx5_list_cache *)&h->buckets[act_size];\n \tfor (i = 0; i < act_size; i++) {\n-\t\tif (mlx5_list_init(&h->buckets[i].l, name, ctx, lcores_share,\n-\t\t\t\t   lcores_share ? &gc[i] : NULL,\n-\t\t\t\t   cb_create, cb_match, cb_remove, cb_clone,\n-\t\t\t\t   cb_clone_free) != 0) {\n+\t\tif (mlx5_list_init(&h->buckets[i].l, &h->l_const,\n+\t\t    lcores_share ? &gc[i] : NULL) != 0) {\n \t\t\tmlx5_free(h);\n \t\t\treturn NULL;\n \t\t}\n@@ -385,6 +431,7 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \treturn h;\n }\n \n+\n struct mlx5_list_entry *\n mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)\n {\n@@ -394,7 +441,7 @@ mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)\n \t\tidx = (uint32_t)(key & h->mask);\n \telse\n \t\tidx = rte_hash_crc_8byte(key, 0) & h->mask;\n-\treturn mlx5_list_lookup(&h->buckets[idx].l, ctx);\n+\treturn _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx);\n }\n \n struct mlx5_list_entry*\n@@ -407,9 +454,9 @@ mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)\n \t\tidx = (uint32_t)(key & h->mask);\n \telse\n \t\tidx = rte_hash_crc_8byte(key, 0) & h->mask;\n-\tentry = mlx5_list_register(&h->buckets[idx].l, ctx);\n+\tentry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx);\n \tif (likely(entry)) {\n-\t\tif (h->lcores_share)\n+\t\tif (h->l_const.lcores_share)\n \t\t\tentry->gentry->bucket_idx = idx;\n \t\telse\n \t\t\tentry->bucket_idx = idx;\n@@ -420,10 +467,10 @@ mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)\n int\n mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)\n {\n-\tuint32_t idx = h->lcores_share ? entry->gentry->bucket_idx :\n+\tuint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx :\n \t\t\t\t\t\t\t      entry->bucket_idx;\n \n-\treturn mlx5_list_unregister(&h->buckets[idx].l, entry);\n+\treturn _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry);\n }\n \n void\n@@ -432,6 +479,6 @@ mlx5_hlist_destroy(struct mlx5_hlist *h)\n \tuint32_t i;\n \n \tfor (i = 0; i <= h->mask; i++)\n-\t\tmlx5_list_uninit(&h->buckets[i].l);\n+\t\tmlx5_list_uninit(&h->buckets[i].l, &h->l_const);\n \tmlx5_free(h);\n }\ndiff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h\nindex d49fb64457..5718a21be0 100644\n--- a/drivers/common/mlx5/mlx5_common_utils.h\n+++ b/drivers/common/mlx5/mlx5_common_utils.h\n@@ -80,6 +80,32 @@ typedef void (*mlx5_list_clone_free_cb)(void *tool_ctx,\n typedef struct mlx5_list_entry *(*mlx5_list_create_cb)(void *tool_ctx,\n \t\t\t\t\t\t       void *ctx);\n \n+/**\n+ * Linked mlx5 list constant object.\n+ */\n+struct mlx5_list_const {\n+\tchar name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */\n+\tvoid *ctx; /* user objects target to callback. */\n+\tbool lcores_share; /* Whether to share objects between the lcores. */\n+\tmlx5_list_create_cb cb_create; /**< entry create callback. */\n+\tmlx5_list_match_cb cb_match; /**< entry match callback. */\n+\tmlx5_list_remove_cb cb_remove; /**< entry remove callback. */\n+\tmlx5_list_clone_cb cb_clone; /**< entry clone callback. */\n+\tmlx5_list_clone_free_cb cb_clone_free;\n+\t/**< entry clone free callback. */\n+};\n+\n+/**\n+ * Linked mlx5 list inconstant data.\n+ */\n+struct mlx5_list_inconst {\n+\trte_rwlock_t lock; /* read/write lock. */\n+\tvolatile uint32_t gen_cnt; /* List modification may update it. */\n+\tvolatile uint32_t count; /* number of entries in list. */\n+\tstruct mlx5_list_cache *cache[RTE_MAX_LCORE + 1];\n+\t/* Lcore cache, last index is the global cache. */\n+};\n+\n /**\n  * Linked mlx5 list structure.\n  *\n@@ -96,19 +122,8 @@ typedef struct mlx5_list_entry *(*mlx5_list_create_cb)(void *tool_ctx,\n  *\n  */\n struct mlx5_list {\n-\tchar name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */\n-\tvoid *ctx; /* user objects target to callback. */\n-\tbool lcores_share; /* Whether to share objects between the lcores. */\n-\tmlx5_list_create_cb cb_create; /**< entry create callback. */\n-\tmlx5_list_match_cb cb_match; /**< entry match callback. */\n-\tmlx5_list_remove_cb cb_remove; /**< entry remove callback. */\n-\tmlx5_list_clone_cb cb_clone; /**< entry clone callback. */\n-\tmlx5_list_clone_free_cb cb_clone_free;\n-\tstruct mlx5_list_cache *cache[RTE_MAX_LCORE + 1];\n-\t/* Lcore cache, last index is the global cache. */\n-\tvolatile uint32_t gen_cnt; /* List modification may update it. */\n-\tvolatile uint32_t count; /* number of entries in list. */\n-\trte_rwlock_t lock; /* read/write lock. */\n+\tstruct mlx5_list_const l_const;\n+\tstruct mlx5_list_inconst l_inconst;\n };\n \n /**\n@@ -219,7 +234,7 @@ mlx5_list_get_entry_num(struct mlx5_list *list);\n \n /* Hash list bucket. */\n struct mlx5_hlist_bucket {\n-\tstruct mlx5_list l;\n+\tstruct mlx5_list_inconst l;\n } __rte_cache_aligned;\n \n /**\n@@ -231,7 +246,7 @@ struct mlx5_hlist {\n \tuint32_t mask; /* A mask for the bucket index range. */\n \tuint8_t flags;\n \tbool direct_key; /* Whether to use the key directly as hash index. */\n-\tbool lcores_share; /* Whether to share objects between the lcores. */\n+\tstruct mlx5_list_const l_const; /* List constant data. */\n \tstruct mlx5_hlist_bucket buckets[] __rte_cache_aligned;\n };\n \n",
    "prefixes": [
        "v4",
        "18/26"
    ]
}