get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95168/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95168,
    "url": "https://patches.dpdk.org/api/patches/95168/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210702061816.10454-16-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210702061816.10454-16-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210702061816.10454-16-suanmingm@nvidia.com",
    "date": "2021-07-02T06:18:09",
    "name": "[v3,15/22] common/mlx5: allocate cache list memory individually",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "31479715421466a1e224310653853b941c820b1d",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210702061816.10454-16-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17585,
            "url": "https://patches.dpdk.org/api/series/17585/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17585",
            "date": "2021-07-02T06:17:54",
            "name": "net/mlx5: insertion rate optimization",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/17585/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/95168/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/95168/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 74EDAA0A0C;\n\tFri,  2 Jul 2021 08:20:32 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1D9C641392;\n\tFri,  2 Jul 2021 08:19:02 +0200 (CEST)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2047.outbound.protection.outlook.com [40.107.93.47])\n by mails.dpdk.org (Postfix) with ESMTP id 71B934137F\n for <dev@dpdk.org>; Fri,  2 Jul 2021 08:18:59 +0200 (CEST)",
            "from BN6PR1201CA0009.namprd12.prod.outlook.com\n (2603:10b6:405:4c::19) by CH2PR12MB3797.namprd12.prod.outlook.com\n (2603:10b6:610:27::27) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.19; Fri, 2 Jul\n 2021 06:18:58 +0000",
            "from BN8NAM11FT026.eop-nam11.prod.protection.outlook.com\n (2603:10b6:405:4c:cafe::d8) by BN6PR1201CA0009.outlook.office365.com\n (2603:10b6:405:4c::19) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.22 via Frontend\n Transport; Fri, 2 Jul 2021 06:18:58 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT026.mail.protection.outlook.com (10.13.177.51) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4287.22 via Frontend Transport; Fri, 2 Jul 2021 06:18:57 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Fri, 2 Jul\n 2021 06:18:55 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=WbMl6I2fYwiiGj9dPN5dygzsDxznH2KyivjLS096BKEJZ//Jsjv4FaW2rIJwnJRfPN0a7XFfCKiNZLoqkErcYIbWpbWyMvtDPCLn/hkk5Vn+6IlG9RLmriVt1V7/xwVwTNW9N6TDyDL2MaNLRrY+PJKHbYTwIDjFUXDfFVBc75J208k4KHPl5hSfLg2JSEdyJUWoZ3ogJDYLmuFkpxERh0SNRWAVQHXJdF15woyDAmTgU/Z9g40GVeu4GL1htiD18Pat+E1JcaaIQTOfpgPRYqsXDVXdjb1/+v4CuXCLuXTocUgatnkW2r6qxJ1Zuz8J7f61sk+VcbhNJpoqifVakQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=EkgzUpz5t/xcsSlUPDMfJQbwf+SjZKFxgQuaQST2xPs=;\n b=MKsd8171A8Y3/YRMK78wbTTOiiGvo/TfmZzkC+Kc3RO0kTVKtawh7gBElADu6PCc4pYZU3GplmwWre4hZ+25u+IJbtaWzXfaz+QO7ckpkdd0+ovtVXWsmVInGaKjzo/8zR/9Qj7cMryaMm6+KhY8ARhXfU+VE1Ej3r9kc8iJbI2rqt6qlVLXYYmnCPaIaiuDjC7KzmJtkOlX4tbPAqT+1OKG1aam/3eQ2FRghN2T2lgIcs3XfXLWGKrOkj67eaXGXv2JSS3uT9nhL1fnvaFlA2fO61jRmnlX7UkfIZGpPnlv0KhanNZRlh5om1Vqf/euuGj6lp5fXqB9gJa6KG4OqQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=EkgzUpz5t/xcsSlUPDMfJQbwf+SjZKFxgQuaQST2xPs=;\n b=JGNOB3FtZgve1l0D0dN/pM2m1l2IxpgbN0MZsiFG50/b+1FgO9FUJk1epRdzFbshpmno1BUVjjIfOVkg7mN9wddPhej4+LWAb4yTq1lN7ZZl1Hdc1W1os/fHS1twKJnKuL9GyBvmaecY8SIt2zoofxs6u7peY4rzhXy/01VSfDkSN4oJg0r/a9XutKRI8xgi0QCemlLgvS35Y1ecVsiPVNmZAKKc7AHqMKRhNwYwq9By2M5ROASIZo7SGIsPoLY3f5B4LA78SPPlJG4Vn2TPbDZtPM2bg6Uuxzvi1ELf6Np49K06eaNDNPI4dxjoUsASqXxQWZI7/4mdi3QdXc4yDA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Fri, 2 Jul 2021 09:18:09 +0300",
        "Message-ID": "<20210702061816.10454-16-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210702061816.10454-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210702061816.10454-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "1fd84974-efe3-45c1-4aa9-08d93d2146d4",
        "X-MS-TrafficTypeDiagnostic": "CH2PR12MB3797:",
        "X-Microsoft-Antispam-PRVS": "\n <CH2PR12MB379790C823D2C4917817716EC11F9@CH2PR12MB3797.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:71;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n bTxq99QLYgbvRv7HHPh4GkrrcxgAMaLnwiOClzmBmCjpgRJBPQv2Ly4289RPC3wIqdvRo9tgReddL9rqffdaxdWub9BUe+y0qy4oUXfRZFewtVAFLVes7AS+zN4LP3K7vXMc69JqyV8Psf2ZxhTgBBbgk9/dKRADrgjDlc0oCwGd2miTXBEK48Q210/f0AhlROrexP9wld9I/hw31ItborVNkXAoHk7VlVfYR5XGhBtCbSvy5oO6Bn3qGdmAeJOcfsEHoJZiRl1yoyPkeWi0MadITC9N1AE5mSQ61a/GcrGKOeDJnEHTOuKN16FxKs2z7TTGzRe+KMcXfd4hBH/ao45Nv2ALf/tIcyvWr2oqGa9swYgc8p+34stdM0YEfrpHL764dKQXfhCJ6XZyC82fEeIw/8DBl/bURfKSpy+LpmA5DeT7rIQ6dQol90XuAE7Vtdr3gctbE20KiaMh4lxxJh/RN8l4OYv3hR2FruUucmTMlJDXsCHTEtHShGEBw6PIcTTb5YUwUUu9hiUlgGpQA1d5jgU2CKmlnNcbhWuCSOlTQDaATjft0Oo4nDMtd0JdUCqXh4v/zoDTwfO4RWFfLEMGDYX8S6Ga4d6E73Hw/ctht6JaEF1/v/tuDSgyuDxLyKFA03v14MzkWOpJJhygFVul6+6ch6lbEae9jMFsdsNAXI+xzzPmhYKkyqGWWdP6dP8NiGkMve2tb1p9v7NVbg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(376002)(396003)(136003)(39860400002)(346002)(46966006)(36840700001)(5660300002)(16526019)(186003)(36860700001)(47076005)(8936002)(356005)(7636003)(6636002)(2906002)(55016002)(83380400001)(6286002)(82740400003)(1076003)(86362001)(36756003)(70586007)(7696005)(426003)(478600001)(4326008)(336012)(2616005)(26005)(36906005)(70206006)(82310400003)(54906003)(110136005)(316002)(6666004)(8676002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "02 Jul 2021 06:18:57.9289 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 1fd84974-efe3-45c1-4aa9-08d93d2146d4",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT026.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CH2PR12MB3797",
        "Subject": "[dpdk-dev] [PATCH v3 15/22] common/mlx5: allocate cache list memory\n individually",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Currently, the list's local cache instance memory is allocated with\nthe list. As the local cache instance array size is RTE_MAX_LCORE,\nmost of the cases the system will only have very limited cores.\nallocate the instance memory individually per core will be more\neconomic to the memory.\n\nThis commit changes the instance array to pointer array, allocate\nthe local cache memory only when the core is to be used.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common_utils.c | 62 ++++++++++++++++++-------\n drivers/common/mlx5/mlx5_common_utils.h |  2 +-\n 2 files changed, 45 insertions(+), 19 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c\nindex 4e385c616a..f75b1cb0da 100644\n--- a/drivers/common/mlx5/mlx5_common_utils.c\n+++ b/drivers/common/mlx5/mlx5_common_utils.c\n@@ -15,14 +15,13 @@\n \n static int\n mlx5_list_init(struct mlx5_list *list, const char *name, void *ctx,\n-\t       bool lcores_share, mlx5_list_create_cb cb_create,\n+\t       bool lcores_share, struct mlx5_list_cache *gc,\n+\t       mlx5_list_create_cb cb_create,\n \t       mlx5_list_match_cb cb_match,\n \t       mlx5_list_remove_cb cb_remove,\n \t       mlx5_list_clone_cb cb_clone,\n \t       mlx5_list_clone_free_cb cb_clone_free)\n {\n-\tint i;\n-\n \tif (!cb_match || !cb_create || !cb_remove || !cb_clone ||\n \t    !cb_clone_free) {\n \t\trte_errno = EINVAL;\n@@ -38,9 +37,11 @@ mlx5_list_init(struct mlx5_list *list, const char *name, void *ctx,\n \tlist->cb_clone = cb_clone;\n \tlist->cb_clone_free = cb_clone_free;\n \trte_rwlock_init(&list->lock);\n+\tif (lcores_share) {\n+\t\tlist->cache[RTE_MAX_LCORE] = gc;\n+\t\tLIST_INIT(&list->cache[RTE_MAX_LCORE]->h);\n+\t}\n \tDRV_LOG(DEBUG, \"mlx5 list %s initialized.\", list->name);\n-\tfor (i = 0; i <= RTE_MAX_LCORE; i++)\n-\t\tLIST_INIT(&list->cache[i].h);\n \treturn 0;\n }\n \n@@ -53,11 +54,16 @@ mlx5_list_create(const char *name, void *ctx, bool lcores_share,\n \t\t mlx5_list_clone_free_cb cb_clone_free)\n {\n \tstruct mlx5_list *list;\n+\tstruct mlx5_list_cache *gc = NULL;\n \n-\tlist = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);\n+\tlist = mlx5_malloc(MLX5_MEM_ZERO,\n+\t\t\t   sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),\n+\t\t\t   0, SOCKET_ID_ANY);\n \tif (!list)\n \t\treturn NULL;\n-\tif (mlx5_list_init(list, name, ctx, lcores_share,\n+\tif (lcores_share)\n+\t\tgc = (struct mlx5_list_cache *)(list + 1);\n+\tif (mlx5_list_init(list, name, ctx, lcores_share, gc,\n \t\t\t   cb_create, cb_match, cb_remove, cb_clone,\n \t\t\t   cb_clone_free) != 0) {\n \t\tmlx5_free(list);\n@@ -69,7 +75,8 @@ mlx5_list_create(const char *name, void *ctx, bool lcores_share,\n static struct mlx5_list_entry *\n __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)\n {\n-\tstruct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);\n+\tstruct mlx5_list_entry *entry =\n+\t\t\t\tLIST_FIRST(&list->cache[lcore_index]->h);\n \tuint32_t ret;\n \n \twhile (entry != NULL) {\n@@ -121,14 +128,14 @@ mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,\n \tlentry->ref_cnt = 1u;\n \tlentry->gentry = gentry;\n \tlentry->lcore_idx = (uint32_t)lcore_index;\n-\tLIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);\n+\tLIST_INSERT_HEAD(&list->cache[lcore_index]->h, lentry, next);\n \treturn lentry;\n }\n \n static void\n __list_cache_clean(struct mlx5_list *list, int lcore_index)\n {\n-\tstruct mlx5_list_cache *c = &list->cache[lcore_index];\n+\tstruct mlx5_list_cache *c = list->cache[lcore_index];\n \tstruct mlx5_list_entry *entry = LIST_FIRST(&c->h);\n \tuint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,\n \t\t\t\t\t       __ATOMIC_RELAXED);\n@@ -161,6 +168,17 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)\n \t\trte_errno = ENOTSUP;\n \t\treturn NULL;\n \t}\n+\tif (unlikely(!list->cache[lcore_index])) {\n+\t\tlist->cache[lcore_index] = mlx5_malloc(0,\n+\t\t\t\t\tsizeof(struct mlx5_list_cache),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\t\tif (!list->cache[lcore_index]) {\n+\t\t\trte_errno = ENOMEM;\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tlist->cache[lcore_index]->inv_cnt = 0;\n+\t\tLIST_INIT(&list->cache[lcore_index]->h);\n+\t}\n \t/* 0. Free entries that was invalidated by other lcores. */\n \t__list_cache_clean(list, lcore_index);\n \t/* 1. Lookup in local cache. */\n@@ -186,7 +204,7 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)\n \tentry->ref_cnt = 1u;\n \tif (!list->lcores_share) {\n \t\tentry->lcore_idx = (uint32_t)lcore_index;\n-\t\tLIST_INSERT_HEAD(&list->cache[lcore_index].h, entry, next);\n+\t\tLIST_INSERT_HEAD(&list->cache[lcore_index]->h, entry, next);\n \t\t__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);\n \t\tDRV_LOG(DEBUG, \"MLX5 list %s c%d entry %p new: %u.\",\n \t\t\tlist->name, lcore_index, (void *)entry, entry->ref_cnt);\n@@ -217,10 +235,10 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)\n \t\t}\n \t}\n \t/* 5. Update lists. */\n-\tLIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);\n+\tLIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE]->h, entry, next);\n \tlist->gen_cnt++;\n \trte_rwlock_write_unlock(&list->lock);\n-\tLIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);\n+\tLIST_INSERT_HEAD(&list->cache[lcore_index]->h, local_entry, next);\n \t__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);\n \tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\", list->name,\n \t\t(void *)entry, entry->ref_cnt);\n@@ -245,7 +263,7 @@ mlx5_list_unregister(struct mlx5_list *list,\n \t\telse\n \t\t\tlist->cb_remove(list->ctx, entry);\n \t} else if (likely(lcore_idx != -1)) {\n-\t\t__atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,\n+\t\t__atomic_add_fetch(&list->cache[entry->lcore_idx]->inv_cnt, 1,\n \t\t\t\t   __ATOMIC_RELAXED);\n \t} else {\n \t\treturn 0;\n@@ -280,8 +298,10 @@ mlx5_list_uninit(struct mlx5_list *list)\n \n \tMLX5_ASSERT(list);\n \tfor (i = 0; i <= RTE_MAX_LCORE; i++) {\n-\t\twhile (!LIST_EMPTY(&list->cache[i].h)) {\n-\t\t\tentry = LIST_FIRST(&list->cache[i].h);\n+\t\tif (!list->cache[i])\n+\t\t\tcontinue;\n+\t\twhile (!LIST_EMPTY(&list->cache[i]->h)) {\n+\t\t\tentry = LIST_FIRST(&list->cache[i]->h);\n \t\t\tLIST_REMOVE(entry, next);\n \t\t\tif (i == RTE_MAX_LCORE) {\n \t\t\t\tlist->cb_remove(list->ctx, entry);\n@@ -292,6 +312,8 @@ mlx5_list_uninit(struct mlx5_list *list)\n \t\t\t\tlist->cb_clone_free(list->ctx, entry);\n \t\t\t}\n \t\t}\n+\t\tif (i != RTE_MAX_LCORE)\n+\t\t\tmlx5_free(list->cache[i]);\n \t}\n }\n \n@@ -320,6 +342,7 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \t\t  mlx5_list_clone_free_cb cb_clone_free)\n {\n \tstruct mlx5_hlist *h;\n+\tstruct mlx5_list_cache *gc;\n \tuint32_t act_size;\n \tuint32_t alloc_size;\n \tuint32_t i;\n@@ -333,7 +356,9 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \t\tact_size = size;\n \t}\n \talloc_size = sizeof(struct mlx5_hlist) +\n-\t\t     sizeof(struct mlx5_hlist_bucket) * act_size;\n+\t\t     sizeof(struct mlx5_hlist_bucket)  * act_size;\n+\tif (lcores_share)\n+\t\talloc_size += sizeof(struct mlx5_list_cache)  * act_size;\n \t/* Using zmalloc, then no need to initialize the heads. */\n \th = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,\n \t\t\tSOCKET_ID_ANY);\n@@ -345,8 +370,10 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \th->mask = act_size - 1;\n \th->lcores_share = lcores_share;\n \th->direct_key = direct_key;\n+\tgc = (struct mlx5_list_cache *)&h->buckets[act_size];\n \tfor (i = 0; i < act_size; i++) {\n \t\tif (mlx5_list_init(&h->buckets[i].l, name, ctx, lcores_share,\n+\t\t\t\t   lcores_share ? &gc[i] : NULL,\n \t\t\t\t   cb_create, cb_match, cb_remove, cb_clone,\n \t\t\t\t   cb_clone_free) != 0) {\n \t\t\tmlx5_free(h);\n@@ -358,7 +385,6 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,\n \treturn h;\n }\n \n-\n struct mlx5_list_entry *\n mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)\n {\ndiff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h\nindex 61b30a45ca..979dfafad4 100644\n--- a/drivers/common/mlx5/mlx5_common_utils.h\n+++ b/drivers/common/mlx5/mlx5_common_utils.h\n@@ -104,7 +104,7 @@ struct mlx5_list {\n \tmlx5_list_remove_cb cb_remove; /**< entry remove callback. */\n \tmlx5_list_clone_cb cb_clone; /**< entry clone callback. */\n \tmlx5_list_clone_free_cb cb_clone_free;\n-\tstruct mlx5_list_cache cache[RTE_MAX_LCORE + 1];\n+\tstruct mlx5_list_cache *cache[RTE_MAX_LCORE + 1];\n \t/* Lcore cache, last index is the global cache. */\n \tvolatile uint32_t gen_cnt; /* List modification may update it. */\n \tvolatile uint32_t count; /* number of entries in list. */\n",
    "prefixes": [
        "v3",
        "15/22"
    ]
}