get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95061/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95061,
    "url": "https://patches.dpdk.org/api/patches/95061/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210630124609.8711-8-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210630124609.8711-8-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210630124609.8711-8-suanmingm@nvidia.com",
    "date": "2021-06-30T12:45:54",
    "name": "[v2,07/22] net/mlx5: add per lcore cache to the list utility",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d8bf98aecba5214976101ddc97e8455d80b5dbf8",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210630124609.8711-8-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17549,
            "url": "https://patches.dpdk.org/api/series/17549/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17549",
            "date": "2021-06-30T12:45:47",
            "name": "net/mlx5: insertion rate optimization",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/17549/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/95061/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/95061/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BE685A0A0F;\n\tWed, 30 Jun 2021 14:47:27 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 83EE640040;\n\tWed, 30 Jun 2021 14:46:53 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2086.outbound.protection.outlook.com [40.107.236.86])\n by mails.dpdk.org (Postfix) with ESMTP id 6679D4127F\n for <dev@dpdk.org>; Wed, 30 Jun 2021 14:46:50 +0200 (CEST)",
            "from BN6PR20CA0064.namprd20.prod.outlook.com (2603:10b6:404:151::26)\n by SJ0PR12MB5422.namprd12.prod.outlook.com (2603:10b6:a03:3ac::15)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.20; Wed, 30 Jun\n 2021 12:46:48 +0000",
            "from BN8NAM11FT042.eop-nam11.prod.protection.outlook.com\n (2603:10b6:404:151:cafe::7d) by BN6PR20CA0064.outlook.office365.com\n (2603:10b6:404:151::26) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.22 via Frontend\n Transport; Wed, 30 Jun 2021 12:46:48 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT042.mail.protection.outlook.com (10.13.177.85) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4287.22 via Frontend Transport; Wed, 30 Jun 2021 12:46:48 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 30 Jun\n 2021 12:46:38 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=OzWdIgAQcGyOFUeJYNSxMxYF/JjVR1XJo5H4hVq6nSeWJz3CXcJ20OTC9uksXwUXIdtIBkSbtQzNYYwh//1qhUja0UDTm32t0kBhSB7OrfTtlkXWMOO/DjaHn3A6B4mid/Le4b7n7N12B2vxe0SMVaSfzgLwZTiEJO7HDbYDvzLsp6DMZpvjBS8Ps2kcrUIyGgTwzlMoW6E77AaiV1wbFIKmAPryfwqJxfZs4YfctfxvQXtzCKlp1/cWut/XJLpsKZZAog3g/4e7j7Vl9fYgQ7GuiODQEr2mEBelHqJU00cyn7VXv4+JqGSKISIIcjzgm/Omyt7D4H0a6ZheUbLxPg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aC6Lf3Q+104ONxdBgqyT9tEC0THd0FUBjvtHzBkGytg=;\n b=IsZGxVbknUnOU9IjRCqNvGWEoLA3BYkUcUNw7VOguCJnr3xpz2NxALm6PD+/QqBKXywBVOlYCsCAUZ3YxfDDbzeyy02yNQY7a4cklGPYiN3R3edf63YnuC+5/t9TCZ69TJLm6FNtLET6kYCIIxA7vMEB8eBKsGhNTIRdnlgkwER/qE43Q/Tb4pECndRsHtzsV+h4n+QmkAOah8wtdOfxqvSv9JN0fE1uk0y+LftqGS5/FRK/f6LwX/zYgxMvNYZYGd8AAGsCRhyTaRg8x6POrnrpdTkIWWKVxVgrYebC7sFP1XbgQCs2Tzc1NbM0bgOJg/DSkE+dVWwoKLv00ZBNmA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aC6Lf3Q+104ONxdBgqyT9tEC0THd0FUBjvtHzBkGytg=;\n b=EnTckv9FHpqeUE5R6z3i+AZ2B872YoGYlzbw584S7ENrCSbM16K82NBMzWr5kfcssMWbpMOXMz1Frjas6evWzM7LOZKyDBI20iqg1Zy6540JrDKMx/XoRL6ijZWmKVuces2lFiwnnmLLfcBJEBM/is/b3V85FfOzAXJ80JG22VXc6Gq+la3z+M1AI/jltBHcqLldh1D7HKJFBZ0P7JjTOYuOMgjGVwyPaiaQZ/2BlsoMFEOBQogrMOpVqtsDDXJ+bcy6I1oa6drBeegpSRCAivwmWo1+AlCkUVgoveZnEA209+dP3SAMKkutxpnlWlpHubK3OW3N/xqE3wgCDaEPDQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Wed, 30 Jun 2021 15:45:54 +0300",
        "Message-ID": "<20210630124609.8711-8-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210630124609.8711-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210630124609.8711-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "ae4fa923-77c2-4904-b54b-08d93bc52015",
        "X-MS-TrafficTypeDiagnostic": "SJ0PR12MB5422:",
        "X-Microsoft-Antispam-PRVS": "\n <SJ0PR12MB5422B2D029D2CD9CC934B4D4C1019@SJ0PR12MB5422.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:86;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n e/37GYwot1Pkd1q4BR5anMt5f10QAr8TmRGGajz6Ygmd9z75QX3lkf5A5d6MxVAhTtY0i4egOFWOEFg4aqyn+yL1u1YQMo/dvSyRLWdbFIXuC0SNSH4THmY5ei6+rbvt9m7BstwvBH634VYMHnDNoZsW7cobCx0UghxbyBwDp4UlQHL5781dTjU7+VmUqo/Do+8yCu/kr7iPaAoDpnK4ImtzAddqdk5RbpyUxHundrXeIarRyl67CvkVJYyikByzyK1hTYoob2ZmgD3rbMLrcmNw0hioXxrHFnKoyMAB5JJchEi78pgE6o4FGdLzMlx3Bt9TxZJt+nW6h/ssrVXHCLi3cc13stzzsznkok3TzBC/j2JAJLkE4QeklgRHLCfVr6Mn+Xc8MeriGJvfHAxtVIJVwQkDeXjLYGmGCZvoq3Io0HSiw/fOB4W7lpX++WNGJQXDHN4jh6tiUKLFnXfhKyUA1hyneci1BBm7dddgIZEG5j8yAMeK/xvFhL4611gU4yZfy5GWJCYU8IxZkTv4LiywOofC6UB8R4Dnee7vnglrANb9PfLVSzKOGga28NtZiSA35GfaC3Z+848a09APioDH2pyTok7755R8f3IgAP2ezYbsrxXUVcXe3+oiATm3uXzystI8z4+1YoDJYX0N64c9O6Qh0l6BqALYBGirRPr5VeltvLB8M0Anwzfvqm/HULeiKyQhb25vE8+9VNUzGw==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(396003)(136003)(39860400002)(346002)(376002)(36840700001)(46966006)(36756003)(82740400003)(86362001)(82310400003)(30864003)(8676002)(36860700001)(47076005)(336012)(4326008)(1076003)(55016002)(7696005)(8936002)(6286002)(54906003)(7636003)(186003)(6666004)(426003)(16526019)(26005)(5660300002)(70586007)(316002)(83380400001)(70206006)(110136005)(6636002)(2616005)(478600001)(356005)(2906002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Jun 2021 12:46:48.0736 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n ae4fa923-77c2-4904-b54b-08d93bc52015",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT042.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SJ0PR12MB5422",
        "Subject": "[dpdk-dev] [PATCH v2 07/22] net/mlx5: add per lcore cache to the\n list utility",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Matan Azrad <matan@nvidia.com>\n\nWhen mlx5 list object is accessed by multiple cores, the list lock\ncounter is all the time written by all the cores what increases cache\nmisses in the memory caches.\n\nIn addition, when one thread accesses the list for add\\remove\\lookup\noperation, all the other threads coming to do an operation in the list\nare stuck in the lock.\n\nAdd per lcore cache to allow thread manipulations to be lockless when\nthe list objects are mostly reused.\n\nSynchronization with atomic operations should be done in order to\nallow threads to unregister an entry from other thread cache.\n\nSigned-off-by: Matan Azrad <matan@nvidia.com>\nAcked-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c   |  58 ++++----\n drivers/net/mlx5/mlx5.h            |   1 +\n drivers/net/mlx5/mlx5_flow.h       |  21 ++-\n drivers/net/mlx5/mlx5_flow_dv.c    | 181 +++++++++++++++++++++++-\n drivers/net/mlx5/mlx5_rx.h         |   5 +\n drivers/net/mlx5/mlx5_rxq.c        |  71 +++++++---\n drivers/net/mlx5/mlx5_utils.c      | 214 ++++++++++++++++++-----------\n drivers/net/mlx5/mlx5_utils.h      |  30 ++--\n drivers/net/mlx5/windows/mlx5_os.c |   5 +-\n 9 files changed, 451 insertions(+), 135 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 9aa57e38b7..8a043526da 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -272,30 +272,38 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \t\tgoto error;\n \t/* The resources below are only valid with DV support. */\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\t/* Init port id action mlx5 list. */\n+\t/* Init port id action list. */\n \tsnprintf(s, sizeof(s), \"%s_port_id_action_list\", sh->ibdev_name);\n-\tmlx5_list_create(&sh->port_id_action_list, s, 0, sh,\n-\t\t\t     flow_dv_port_id_create_cb,\n-\t\t\t     flow_dv_port_id_match_cb,\n-\t\t\t     flow_dv_port_id_remove_cb);\n-\t/* Init push vlan action mlx5 list. */\n+\tmlx5_list_create(&sh->port_id_action_list, s, sh,\n+\t\t\t flow_dv_port_id_create_cb,\n+\t\t\t flow_dv_port_id_match_cb,\n+\t\t\t flow_dv_port_id_remove_cb,\n+\t\t\t flow_dv_port_id_clone_cb,\n+\t\t\t flow_dv_port_id_clone_free_cb);\n+\t/* Init push vlan action list. */\n \tsnprintf(s, sizeof(s), \"%s_push_vlan_action_list\", sh->ibdev_name);\n-\tmlx5_list_create(&sh->push_vlan_action_list, s, 0, sh,\n-\t\t\t     flow_dv_push_vlan_create_cb,\n-\t\t\t     flow_dv_push_vlan_match_cb,\n-\t\t\t     flow_dv_push_vlan_remove_cb);\n-\t/* Init sample action mlx5 list. */\n+\tmlx5_list_create(&sh->push_vlan_action_list, s, sh,\n+\t\t\t flow_dv_push_vlan_create_cb,\n+\t\t\t flow_dv_push_vlan_match_cb,\n+\t\t\t flow_dv_push_vlan_remove_cb,\n+\t\t\t flow_dv_push_vlan_clone_cb,\n+\t\t\t flow_dv_push_vlan_clone_free_cb);\n+\t/* Init sample action list. */\n \tsnprintf(s, sizeof(s), \"%s_sample_action_list\", sh->ibdev_name);\n-\tmlx5_list_create(&sh->sample_action_list, s, 0, sh,\n-\t\t\t     flow_dv_sample_create_cb,\n-\t\t\t     flow_dv_sample_match_cb,\n-\t\t\t     flow_dv_sample_remove_cb);\n-\t/* Init dest array action mlx5 list. */\n+\tmlx5_list_create(&sh->sample_action_list, s, sh,\n+\t\t\t flow_dv_sample_create_cb,\n+\t\t\t flow_dv_sample_match_cb,\n+\t\t\t flow_dv_sample_remove_cb,\n+\t\t\t flow_dv_sample_clone_cb,\n+\t\t\t flow_dv_sample_clone_free_cb);\n+\t/* Init dest array action list. */\n \tsnprintf(s, sizeof(s), \"%s_dest_array_list\", sh->ibdev_name);\n-\tmlx5_list_create(&sh->dest_array_list, s, 0, sh,\n-\t\t\t     flow_dv_dest_array_create_cb,\n-\t\t\t     flow_dv_dest_array_match_cb,\n-\t\t\t     flow_dv_dest_array_remove_cb);\n+\tmlx5_list_create(&sh->dest_array_list, s, sh,\n+\t\t\t flow_dv_dest_array_create_cb,\n+\t\t\t flow_dv_dest_array_match_cb,\n+\t\t\t flow_dv_dest_array_remove_cb,\n+\t\t\t flow_dv_dest_array_clone_cb,\n+\t\t\t flow_dv_dest_array_clone_free_cb);\n \t/* Create tags hash list table. */\n \tsnprintf(s, sizeof(s), \"%s_tags\", sh->ibdev_name);\n \tsh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0,\n@@ -1702,10 +1710,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\terr = ENOTSUP;\n \t\t\tgoto error;\n \t}\n-\tmlx5_list_create(&priv->hrxqs, \"hrxq\", 0, eth_dev,\n-\t\t\t     mlx5_hrxq_create_cb,\n-\t\t\t     mlx5_hrxq_match_cb,\n-\t\t\t     mlx5_hrxq_remove_cb);\n+\tmlx5_list_create(&priv->hrxqs, \"hrxq\", eth_dev, mlx5_hrxq_create_cb,\n+\t\t\t mlx5_hrxq_match_cb,\n+\t\t\t mlx5_hrxq_remove_cb,\n+\t\t\t mlx5_hrxq_clone_cb,\n+\t\t\t mlx5_hrxq_clone_free_cb);\n+\trte_rwlock_init(&priv->ind_tbls_lock);\n \t/* Query availability of metadata reg_c's. */\n \terr = mlx5_flow_discover_mreg_c(eth_dev);\n \tif (err < 0) {\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 803387b98e..b01ce5cbfd 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1365,6 +1365,7 @@ struct mlx5_priv {\n \t/* Indirection tables. */\n \tLIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;\n \t/* Pointer to next element. */\n+\trte_rwlock_t ind_tbls_lock;\n \tuint32_t refcnt; /**< Reference counter. */\n \t/**< Verbs modify header action object. */\n \tuint8_t ft_type; /**< Flow table type, Rx or Tx. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 4dec703366..ce363355c1 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1634,7 +1634,11 @@ struct mlx5_list_entry *flow_dv_port_id_create_cb(struct mlx5_list *list,\n \t\t\t\t\t\t  void *cb_ctx);\n void flow_dv_port_id_remove_cb(struct mlx5_list *list,\n \t\t\t       struct mlx5_list_entry *entry);\n-\n+struct mlx5_list_entry *flow_dv_port_id_clone_cb(struct mlx5_list *list,\n+\t\t\t\tstruct mlx5_list_entry *entry __rte_unused,\n+\t\t\t\t void *cb_ctx);\n+void flow_dv_port_id_clone_free_cb(struct mlx5_list *list,\n+\t\t\t\tstruct mlx5_list_entry *entry __rte_unused);\n int flow_dv_push_vlan_match_cb(struct mlx5_list *list,\n \t\t\t       struct mlx5_list_entry *entry, void *cb_ctx);\n struct mlx5_list_entry *flow_dv_push_vlan_create_cb(struct mlx5_list *list,\n@@ -1642,6 +1646,11 @@ struct mlx5_list_entry *flow_dv_push_vlan_create_cb(struct mlx5_list *list,\n \t\t\t\t\t\t  void *cb_ctx);\n void flow_dv_push_vlan_remove_cb(struct mlx5_list *list,\n \t\t\t\t struct mlx5_list_entry *entry);\n+struct mlx5_list_entry *flow_dv_push_vlan_clone_cb\n+\t\t\t\t(struct mlx5_list *list,\n+\t\t\t\t struct mlx5_list_entry *entry, void *cb_ctx);\n+void flow_dv_push_vlan_clone_free_cb(struct mlx5_list *list,\n+\t\t\t\t struct mlx5_list_entry *entry);\n \n int flow_dv_sample_match_cb(struct mlx5_list *list,\n \t\t\t    struct mlx5_list_entry *entry, void *cb_ctx);\n@@ -1650,6 +1659,11 @@ struct mlx5_list_entry *flow_dv_sample_create_cb(struct mlx5_list *list,\n \t\t\t\t\t\t void *cb_ctx);\n void flow_dv_sample_remove_cb(struct mlx5_list *list,\n \t\t\t      struct mlx5_list_entry *entry);\n+struct mlx5_list_entry *flow_dv_sample_clone_cb\n+\t\t\t\t(struct mlx5_list *list,\n+\t\t\t\t struct mlx5_list_entry *entry, void *cb_ctx);\n+void flow_dv_sample_clone_free_cb(struct mlx5_list *list,\n+\t\t\t      struct mlx5_list_entry *entry);\n \n int flow_dv_dest_array_match_cb(struct mlx5_list *list,\n \t\t\t\tstruct mlx5_list_entry *entry, void *cb_ctx);\n@@ -1658,6 +1672,11 @@ struct mlx5_list_entry *flow_dv_dest_array_create_cb(struct mlx5_list *list,\n \t\t\t\t\t\t  void *cb_ctx);\n void flow_dv_dest_array_remove_cb(struct mlx5_list *list,\n \t\t\t\t  struct mlx5_list_entry *entry);\n+struct mlx5_list_entry *flow_dv_dest_array_clone_cb\n+\t\t\t\t(struct mlx5_list *list,\n+\t\t\t\t struct mlx5_list_entry *entry, void *cb_ctx);\n+void flow_dv_dest_array_clone_free_cb(struct mlx5_list *list,\n+\t\t\t\t  struct mlx5_list_entry *entry);\n struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,\n \t\t\t\t\t\t    uint32_t age_idx);\n int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex ed0381329f..4a86d153f0 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -3820,6 +3820,39 @@ flow_dv_port_id_create_cb(struct mlx5_list *list,\n \treturn &resource->entry;\n }\n \n+struct mlx5_list_entry *\n+flow_dv_port_id_clone_cb(struct mlx5_list *list,\n+\t\t\t  struct mlx5_list_entry *entry __rte_unused,\n+\t\t\t  void *cb_ctx)\n+{\n+\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n+\tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n+\tstruct mlx5_flow_dv_port_id_action_resource *resource;\n+\tuint32_t idx;\n+\n+\tresource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);\n+\tif (!resource) {\n+\t\trte_flow_error_set(ctx->error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"cannot allocate port_id action memory\");\n+\t\treturn NULL;\n+\t}\n+\tmemcpy(resource, entry, sizeof(*resource));\n+\tresource->idx = idx;\n+\treturn &resource->entry;\n+}\n+\n+void\n+flow_dv_port_id_clone_free_cb(struct mlx5_list *list,\n+\t\t\t  struct mlx5_list_entry *entry)\n+{\n+\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n+\tstruct mlx5_flow_dv_port_id_action_resource *resource =\n+\t\t\tcontainer_of(entry, typeof(*resource), entry);\n+\n+\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);\n+}\n+\n /**\n  * Find existing table port ID resource or create and register a new one.\n  *\n@@ -3912,6 +3945,39 @@ flow_dv_push_vlan_create_cb(struct mlx5_list *list,\n \treturn &resource->entry;\n }\n \n+struct mlx5_list_entry *\n+flow_dv_push_vlan_clone_cb(struct mlx5_list *list,\n+\t\t\t  struct mlx5_list_entry *entry __rte_unused,\n+\t\t\t  void *cb_ctx)\n+{\n+\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n+\tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n+\tstruct mlx5_flow_dv_push_vlan_action_resource *resource;\n+\tuint32_t idx;\n+\n+\tresource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);\n+\tif (!resource) {\n+\t\trte_flow_error_set(ctx->error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"cannot allocate push_vlan action memory\");\n+\t\treturn NULL;\n+\t}\n+\tmemcpy(resource, entry, sizeof(*resource));\n+\tresource->idx = idx;\n+\treturn &resource->entry;\n+}\n+\n+void\n+flow_dv_push_vlan_clone_free_cb(struct mlx5_list *list,\n+\t\t\t    struct mlx5_list_entry *entry)\n+{\n+\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n+\tstruct mlx5_flow_dv_push_vlan_action_resource *resource =\n+\t\t\tcontainer_of(entry, typeof(*resource), entry);\n+\n+\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);\n+}\n+\n /**\n  * Find existing push vlan resource or create and register a new one.\n  *\n@@ -9848,6 +9914,36 @@ flow_dv_matcher_enable(uint32_t *match_criteria)\n \treturn match_criteria_enable;\n }\n \n+static struct mlx5_list_entry *\n+flow_dv_matcher_clone_cb(struct mlx5_list *list __rte_unused,\n+\t\t\t struct mlx5_list_entry *entry, void *cb_ctx)\n+{\n+\tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n+\tstruct mlx5_flow_dv_matcher *ref = ctx->data;\n+\tstruct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,\n+\t\t\t\t\t\t\t    typeof(*tbl), tbl);\n+\tstruct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,\n+\t\t\t\t\t\t\t    sizeof(*resource),\n+\t\t\t\t\t\t\t    0, SOCKET_ID_ANY);\n+\n+\tif (!resource) {\n+\t\trte_flow_error_set(ctx->error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"cannot create matcher\");\n+\t\treturn NULL;\n+\t}\n+\tmemcpy(resource, entry, sizeof(*resource));\n+\tresource->tbl = &tbl->tbl;\n+\treturn &resource->entry;\n+}\n+\n+static void\n+flow_dv_matcher_clone_free_cb(struct mlx5_list *list __rte_unused,\n+\t\t\t     struct mlx5_list_entry *entry)\n+{\n+\tmlx5_free(entry);\n+}\n+\n struct mlx5_hlist_entry *\n flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)\n {\n@@ -9914,10 +10010,12 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)\n \tMKSTR(matcher_name, \"%s_%s_%u_%u_matcher_list\",\n \t      key.is_fdb ? \"FDB\" : \"NIC\", key.is_egress ? \"egress\" : \"ingress\",\n \t      key.level, key.id);\n-\tmlx5_list_create(&tbl_data->matchers, matcher_name, 0, sh,\n+\tmlx5_list_create(&tbl_data->matchers, matcher_name, sh,\n \t\t\t flow_dv_matcher_create_cb,\n \t\t\t flow_dv_matcher_match_cb,\n-\t\t\t flow_dv_matcher_remove_cb);\n+\t\t\t flow_dv_matcher_remove_cb,\n+\t\t\t flow_dv_matcher_clone_cb,\n+\t\t\t flow_dv_matcher_clone_free_cb);\n \treturn &tbl_data->entry;\n }\n \n@@ -10705,6 +10803,45 @@ flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,\n \n }\n \n+struct mlx5_list_entry *\n+flow_dv_sample_clone_cb(struct mlx5_list *list __rte_unused,\n+\t\t\t struct mlx5_list_entry *entry __rte_unused,\n+\t\t\t void *cb_ctx)\n+{\n+\tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n+\tstruct rte_eth_dev *dev = ctx->dev;\n+\tstruct mlx5_flow_dv_sample_resource *resource;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n+\tuint32_t idx = 0;\n+\n+\tresource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);\n+\tif (!resource) {\n+\t\trte_flow_error_set(ctx->error, ENOMEM,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"cannot allocate resource memory\");\n+\t\treturn NULL;\n+\t}\n+\tmemcpy(resource, entry, sizeof(*resource));\n+\tresource->idx = idx;\n+\tresource->dev = dev;\n+\treturn &resource->entry;\n+}\n+\n+void\n+flow_dv_sample_clone_free_cb(struct mlx5_list *list __rte_unused,\n+\t\t\t struct mlx5_list_entry *entry)\n+{\n+\tstruct mlx5_flow_dv_sample_resource *resource =\n+\t\t\tcontainer_of(entry, typeof(*resource), entry);\n+\tstruct rte_eth_dev *dev = resource->dev;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],\n+\t\t\tresource->idx);\n+}\n+\n /**\n  * Find existing sample resource or create and register a new one.\n  *\n@@ -10880,6 +11017,46 @@ flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,\n \treturn NULL;\n }\n \n+struct mlx5_list_entry *\n+flow_dv_dest_array_clone_cb(struct mlx5_list *list __rte_unused,\n+\t\t\t struct mlx5_list_entry *entry __rte_unused,\n+\t\t\t void *cb_ctx)\n+{\n+\tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n+\tstruct rte_eth_dev *dev = ctx->dev;\n+\tstruct mlx5_flow_dv_dest_array_resource *resource;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n+\tuint32_t res_idx = 0;\n+\tstruct rte_flow_error *error = ctx->error;\n+\n+\tresource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],\n+\t\t\t\t      &res_idx);\n+\tif (!resource) {\n+\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"cannot allocate dest-array memory\");\n+\t\treturn NULL;\n+\t}\n+\tmemcpy(resource, entry, sizeof(*resource));\n+\tresource->idx = res_idx;\n+\tresource->dev = dev;\n+\treturn &resource->entry;\n+}\n+\n+void\n+flow_dv_dest_array_clone_free_cb(struct mlx5_list *list __rte_unused,\n+\t\t\t     struct mlx5_list_entry *entry)\n+{\n+\tstruct mlx5_flow_dv_dest_array_resource *resource =\n+\t\t\tcontainer_of(entry, typeof(*resource), entry);\n+\tstruct rte_eth_dev *dev = resource->dev;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);\n+}\n+\n /**\n  * Find existing destination array resource or create and register a new one.\n  *\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 3dcc71d51d..5450ddd388 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -229,6 +229,11 @@ int mlx5_hrxq_match_cb(struct mlx5_list *list,\n \t\t       void *cb_ctx);\n void mlx5_hrxq_remove_cb(struct mlx5_list *list,\n \t\t\t struct mlx5_list_entry *entry);\n+struct mlx5_list_entry *mlx5_hrxq_clone_cb(struct mlx5_list *list,\n+\t\t\t\t\t   struct mlx5_list_entry *entry,\n+\t\t\t\t\t   void *cb_ctx __rte_unused);\n+void mlx5_hrxq_clone_free_cb(struct mlx5_list *list,\n+\t\t\t     struct mlx5_list_entry *entry);\n uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t\t       struct mlx5_flow_rss_desc *rss_desc);\n int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 8395332507..f8769da8dc 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -1857,20 +1857,18 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_ind_table_obj *ind_tbl;\n \n+\trte_rwlock_read_lock(&priv->ind_tbls_lock);\n \tLIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {\n \t\tif ((ind_tbl->queues_n == queues_n) &&\n \t\t    (memcmp(ind_tbl->queues, queues,\n \t\t\t    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))\n-\t\t     == 0))\n+\t\t     == 0)) {\n+\t\t\t__atomic_fetch_add(&ind_tbl->refcnt, 1,\n+\t\t\t\t\t   __ATOMIC_RELAXED);\n \t\t\tbreak;\n+\t\t}\n \t}\n-\tif (ind_tbl) {\n-\t\tunsigned int i;\n-\n-\t\t__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);\n-\t\tfor (i = 0; i != ind_tbl->queues_n; ++i)\n-\t\t\tmlx5_rxq_get(dev, ind_tbl->queues[i]);\n-\t}\n+\trte_rwlock_read_unlock(&priv->ind_tbls_lock);\n \treturn ind_tbl;\n }\n \n@@ -1893,19 +1891,20 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \t\t\t   bool standalone)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tunsigned int i;\n+\tunsigned int i, ret;\n \n-\tif (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0)\n-\t\tpriv->obj_ops.ind_table_destroy(ind_tbl);\n+\trte_rwlock_write_lock(&priv->ind_tbls_lock);\n+\tret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);\n+\tif (!ret && !standalone)\n+\t\tLIST_REMOVE(ind_tbl, next);\n+\trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n+\tif (ret)\n+\t\treturn 1;\n+\tpriv->obj_ops.ind_table_destroy(ind_tbl);\n \tfor (i = 0; i != ind_tbl->queues_n; ++i)\n \t\tclaim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));\n-\tif (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {\n-\t\tif (!standalone)\n-\t\t\tLIST_REMOVE(ind_tbl, next);\n-\t\tmlx5_free(ind_tbl);\n-\t\treturn 0;\n-\t}\n-\treturn 1;\n+\tmlx5_free(ind_tbl);\n+\treturn 0;\n }\n \n /**\n@@ -1924,12 +1923,14 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)\n \tstruct mlx5_ind_table_obj *ind_tbl;\n \tint ret = 0;\n \n+\trte_rwlock_read_lock(&priv->ind_tbls_lock);\n \tLIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"port %u indirection table obj %p still referenced\",\n \t\t\tdev->data->port_id, (void *)ind_tbl);\n \t\t++ret;\n \t}\n+\trte_rwlock_read_unlock(&priv->ind_tbls_lock);\n \treturn ret;\n }\n \n@@ -2015,8 +2016,11 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n \t\tmlx5_free(ind_tbl);\n \t\treturn NULL;\n \t}\n-\tif (!standalone)\n+\tif (!standalone) {\n+\t\trte_rwlock_write_lock(&priv->ind_tbls_lock);\n \t\tLIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);\n+\t\trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n+\t}\n \treturn ind_tbl;\n }\n \n@@ -2328,6 +2332,35 @@ mlx5_hrxq_create_cb(struct mlx5_list *list,\n \treturn hrxq ? &hrxq->entry : NULL;\n }\n \n+struct mlx5_list_entry *\n+mlx5_hrxq_clone_cb(struct mlx5_list *list,\n+\t\t    struct mlx5_list_entry *entry,\n+\t\t    void *cb_ctx __rte_unused)\n+{\n+\tstruct rte_eth_dev *dev = list->ctx;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hrxq *hrxq;\n+\tuint32_t hrxq_idx = 0;\n+\n+\thrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);\n+\tif (!hrxq)\n+\t\treturn NULL;\n+\tmemcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);\n+\thrxq->idx = hrxq_idx;\n+\treturn &hrxq->entry;\n+}\n+\n+void\n+mlx5_hrxq_clone_free_cb(struct mlx5_list *list,\n+\t\t    struct mlx5_list_entry *entry)\n+{\n+\tstruct rte_eth_dev *dev = list->ctx;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);\n+\n+\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);\n+}\n+\n /**\n  * Get an Rx Hash queue.\n  *\ndiff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c\nindex a2b5accb84..51cca68ea9 100644\n--- a/drivers/net/mlx5/mlx5_utils.c\n+++ b/drivers/net/mlx5/mlx5_utils.c\n@@ -9,57 +9,68 @@\n #include \"mlx5_utils.h\"\n \n \n-/********************* MLX5 list ************************/\n-\n-static struct mlx5_list_entry *\n-mlx5_list_default_create_cb(struct mlx5_list *list,\n-\t\t\t     struct mlx5_list_entry *entry __rte_unused,\n-\t\t\t     void *ctx __rte_unused)\n-{\n-\treturn mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);\n-}\n-\n-static void\n-mlx5_list_default_remove_cb(struct mlx5_list *list __rte_unused,\n-\t\t\t     struct mlx5_list_entry *entry)\n-{\n-\tmlx5_free(entry);\n-}\n+/********************* mlx5 list ************************/\n \n int\n-mlx5_list_create(struct mlx5_list *list, const char *name,\n-\t\t     uint32_t entry_size, void *ctx,\n-\t\t     mlx5_list_create_cb cb_create,\n-\t\t     mlx5_list_match_cb cb_match,\n-\t\t     mlx5_list_remove_cb cb_remove)\n+mlx5_list_create(struct mlx5_list *list, const char *name, void *ctx,\n+\t\t mlx5_list_create_cb cb_create,\n+\t\t mlx5_list_match_cb cb_match,\n+\t\t mlx5_list_remove_cb cb_remove,\n+\t\t mlx5_list_clone_cb cb_clone,\n+\t\t mlx5_list_clone_free_cb cb_clone_free)\n {\n+\tint i;\n+\n \tMLX5_ASSERT(list);\n-\tif (!cb_match || (!cb_create ^ !cb_remove))\n+\tif (!cb_match || !cb_create || !cb_remove || !cb_clone ||\n+\t    !cb_clone_free)\n \t\treturn -1;\n \tif (name)\n \t\tsnprintf(list->name, sizeof(list->name), \"%s\", name);\n-\tlist->entry_sz = entry_size;\n \tlist->ctx = ctx;\n-\tlist->cb_create = cb_create ? cb_create : mlx5_list_default_create_cb;\n+\tlist->cb_create = cb_create;\n \tlist->cb_match = cb_match;\n-\tlist->cb_remove = cb_remove ? cb_remove : mlx5_list_default_remove_cb;\n+\tlist->cb_remove = cb_remove;\n+\tlist->cb_clone = cb_clone;\n+\tlist->cb_clone_free = cb_clone_free;\n \trte_rwlock_init(&list->lock);\n \tDRV_LOG(DEBUG, \"mlx5 list %s initialized.\", list->name);\n-\tLIST_INIT(&list->head);\n+\tfor (i = 0; i <= RTE_MAX_LCORE; i++)\n+\t\tLIST_INIT(&list->cache[i].h);\n \treturn 0;\n }\n \n static struct mlx5_list_entry *\n-__list_lookup(struct mlx5_list *list, void *ctx, bool reuse)\n+__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)\n {\n-\tstruct mlx5_list_entry *entry;\n-\n-\tLIST_FOREACH(entry, &list->head, next) {\n-\t\tif (list->cb_match(list, entry, ctx))\n+\tstruct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);\n+\tuint32_t ret;\n+\n+\twhile (entry != NULL) {\n+\t\tstruct mlx5_list_entry *nentry = LIST_NEXT(entry, next);\n+\n+\t\tif (list->cb_match(list, entry, ctx)) {\n+\t\t\tif (lcore_index < RTE_MAX_LCORE) {\n+\t\t\t\tret = __atomic_load_n(&entry->ref_cnt,\n+\t\t\t\t\t\t      __ATOMIC_ACQUIRE);\n+\t\t\t\tif (ret == 0) {\n+\t\t\t\t\tLIST_REMOVE(entry, next);\n+\t\t\t\t\tlist->cb_clone_free(list, entry);\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tentry = nentry;\n \t\t\tcontinue;\n+\t\t}\n \t\tif (reuse) {\n-\t\t\t__atomic_add_fetch(&entry->ref_cnt, 1,\n-\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\tret = __atomic_add_fetch(&entry->ref_cnt, 1,\n+\t\t\t\t\t\t __ATOMIC_ACQUIRE);\n+\t\t\tif (ret == 1u) {\n+\t\t\t\t/* Entry was invalid before, free it. */\n+\t\t\t\tLIST_REMOVE(entry, next);\n+\t\t\t\tlist->cb_clone_free(list, entry);\n+\t\t\t\tentry = nentry;\n+\t\t\t\tcontinue;\n+\t\t\t}\n \t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p ref++: %u.\",\n \t\t\t\tlist->name, (void *)entry, entry->ref_cnt);\n \t\t}\n@@ -68,96 +79,141 @@ __list_lookup(struct mlx5_list *list, void *ctx, bool reuse)\n \treturn entry;\n }\n \n-static struct mlx5_list_entry *\n-list_lookup(struct mlx5_list *list, void *ctx, bool reuse)\n+struct mlx5_list_entry *\n+mlx5_list_lookup(struct mlx5_list *list, void *ctx)\n {\n-\tstruct mlx5_list_entry *entry;\n+\tstruct mlx5_list_entry *entry = NULL;\n+\tint i;\n \n \trte_rwlock_read_lock(&list->lock);\n-\tentry = __list_lookup(list, ctx, reuse);\n+\tfor (i = 0; i < RTE_MAX_LCORE; i++) {\n+\t\tentry = __list_lookup(list, i, ctx, false);\n+\t\tif (entry)\n+\t\t\tbreak;\n+\t}\n \trte_rwlock_read_unlock(&list->lock);\n \treturn entry;\n }\n \n-struct mlx5_list_entry *\n-mlx5_list_lookup(struct mlx5_list *list, void *ctx)\n+static struct mlx5_list_entry *\n+mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,\n+\t\t       struct mlx5_list_entry *gentry, void *ctx)\n {\n-\treturn list_lookup(list, ctx, false);\n+\tstruct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);\n+\n+\tif (!lentry)\n+\t\treturn NULL;\n+\tlentry->ref_cnt = 1u;\n+\tlentry->gentry = gentry;\n+\tLIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);\n+\treturn lentry;\n }\n \n struct mlx5_list_entry *\n mlx5_list_register(struct mlx5_list *list, void *ctx)\n {\n-\tstruct mlx5_list_entry *entry;\n+\tstruct mlx5_list_entry *entry, *lentry;\n \tuint32_t prev_gen_cnt = 0;\n+\tint lcore_index = rte_lcore_index(rte_lcore_id());\n \n \tMLX5_ASSERT(list);\n-\tprev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);\n+\tMLX5_ASSERT(lcore_index < RTE_MAX_LCORE);\n+\tif (unlikely(lcore_index == -1)) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\t/* Lookup in local cache. */\n+\tlentry = __list_lookup(list, lcore_index, ctx, true);\n+\tif (lentry)\n+\t\treturn lentry;\n \t/* Lookup with read lock, reuse if found. */\n-\tentry = list_lookup(list, ctx, true);\n-\tif (entry)\n-\t\treturn entry;\n+\trte_rwlock_read_lock(&list->lock);\n+\tentry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);\n+\tif (entry == NULL) {\n+\t\tprev_gen_cnt = __atomic_load_n(&list->gen_cnt,\n+\t\t\t\t\t       __ATOMIC_ACQUIRE);\n+\t\trte_rwlock_read_unlock(&list->lock);\n+\t} else {\n+\t\trte_rwlock_read_unlock(&list->lock);\n+\t\treturn mlx5_list_cache_insert(list, lcore_index, entry, ctx);\n+\t}\n \t/* Not found, append with write lock - block read from other threads. */\n \trte_rwlock_write_lock(&list->lock);\n \t/* If list changed by other threads before lock, search again. */\n \tif (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {\n \t\t/* Lookup and reuse w/o read lock. */\n-\t\tentry = __list_lookup(list, ctx, true);\n-\t\tif (entry)\n-\t\t\tgoto done;\n+\t\tentry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);\n+\t\tif (entry) {\n+\t\t\trte_rwlock_write_unlock(&list->lock);\n+\t\t\treturn mlx5_list_cache_insert(list, lcore_index, entry,\n+\t\t\t\t\t\t      ctx);\n+\t\t}\n \t}\n \tentry = list->cb_create(list, entry, ctx);\n-\tif (!entry) {\n-\t\tDRV_LOG(ERR, \"Failed to init mlx5 list %s entry %p.\",\n-\t\t\tlist->name, (void *)entry);\n-\t\tgoto done;\n+\tif (entry) {\n+\t\tlentry = mlx5_list_cache_insert(list, lcore_index, entry, ctx);\n+\t\tif (!lentry) {\n+\t\t\tlist->cb_remove(list, entry);\n+\t\t} else {\n+\t\t\tentry->ref_cnt = 1u;\n+\t\t\tLIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry,\n+\t\t\t\t\t next);\n+\t\t\t__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);\n+\t\t\t__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n+\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\",\n+\t\t\t\tlist->name, (void *)entry, entry->ref_cnt);\n+\t\t}\n+\n \t}\n-\tentry->ref_cnt = 1;\n-\tLIST_INSERT_HEAD(&list->head, entry, next);\n-\t__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);\n-\t__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n-\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\",\n-\t\tlist->name, (void *)entry, entry->ref_cnt);\n-done:\n \trte_rwlock_write_unlock(&list->lock);\n-\treturn entry;\n+\treturn lentry;\n }\n \n int\n mlx5_list_unregister(struct mlx5_list *list,\n \t\t      struct mlx5_list_entry *entry)\n {\n+\tstruct mlx5_list_entry *gentry = entry->gentry;\n+\n+\tif (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_ACQUIRE) != 0)\n+\t\treturn 1;\n+\tif (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_ACQUIRE) != 0)\n+\t\treturn 1;\n \trte_rwlock_write_lock(&list->lock);\n-\tMLX5_ASSERT(entry && entry->next.le_prev);\n-\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p ref--: %u.\",\n-\t\tlist->name, (void *)entry, entry->ref_cnt);\n-\tif (--entry->ref_cnt) {\n+\tif (__atomic_load_n(&gentry->ref_cnt, __ATOMIC_ACQUIRE) == 0) {\n+\t\t__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);\n+\t\t__atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n+\t\tLIST_REMOVE(gentry, next);\n+\t\tlist->cb_remove(list, gentry);\n \t\trte_rwlock_write_unlock(&list->lock);\n-\t\treturn 1;\n+\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p removed.\",\n+\t\t\tlist->name, (void *)gentry);\n+\t\treturn 0;\n \t}\n-\t__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);\n-\t__atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n-\tLIST_REMOVE(entry, next);\n-\tlist->cb_remove(list, entry);\n \trte_rwlock_write_unlock(&list->lock);\n-\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p removed.\",\n-\t\tlist->name, (void *)entry);\n-\treturn 0;\n+\treturn 1;\n }\n \n void\n mlx5_list_destroy(struct mlx5_list *list)\n {\n \tstruct mlx5_list_entry *entry;\n+\tint i;\n \n \tMLX5_ASSERT(list);\n-\t/* no LIST_FOREACH_SAFE, using while instead */\n-\twhile (!LIST_EMPTY(&list->head)) {\n-\t\tentry = LIST_FIRST(&list->head);\n-\t\tLIST_REMOVE(entry, next);\n-\t\tlist->cb_remove(list, entry);\n-\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p destroyed.\",\n-\t\t\tlist->name, (void *)entry);\n+\tfor (i = 0; i <= RTE_MAX_LCORE; i++) {\n+\t\twhile (!LIST_EMPTY(&list->cache[i].h)) {\n+\t\t\tentry = LIST_FIRST(&list->cache[i].h);\n+\t\t\tLIST_REMOVE(entry, next);\n+\t\t\tif (i == RTE_MAX_LCORE) {\n+\t\t\t\tlist->cb_remove(list, entry);\n+\t\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p \"\n+\t\t\t\t\t\"destroyed.\", list->name,\n+\t\t\t\t\t(void *)entry);\n+\t\t\t} else {\n+\t\t\t\tlist->cb_clone_free(list, entry);\n+\t\t\t}\n+\t\t}\n \t}\n \tmemset(list, 0, sizeof(*list));\n }\ndiff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h\nindex 593793345d..24ae2b2ccb 100644\n--- a/drivers/net/mlx5/mlx5_utils.h\n+++ b/drivers/net/mlx5/mlx5_utils.h\n@@ -309,9 +309,14 @@ struct mlx5_list;\n  */\n struct mlx5_list_entry {\n \tLIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */\n-\tuint32_t ref_cnt; /* Reference count. */\n+\tuint32_t ref_cnt; /* 0 means, entry is invalid. */\n+\tstruct mlx5_list_entry *gentry;\n };\n \n+struct mlx5_list_cache {\n+\tLIST_HEAD(mlx5_list_head, mlx5_list_entry) h;\n+} __rte_cache_aligned;\n+\n /**\n  * Type of callback function for entry removal.\n  *\n@@ -339,6 +344,13 @@ typedef void (*mlx5_list_remove_cb)(struct mlx5_list *list,\n typedef int (*mlx5_list_match_cb)(struct mlx5_list *list,\n \t\t\t\t   struct mlx5_list_entry *entry, void *ctx);\n \n+typedef struct mlx5_list_entry *(*mlx5_list_clone_cb)\n+\t\t\t\t (struct mlx5_list *list,\n+\t\t\t\t  struct mlx5_list_entry *entry, void *ctx);\n+\n+typedef void (*mlx5_list_clone_free_cb)(struct mlx5_list *list,\n+\t\t\t\t\t struct mlx5_list_entry *entry);\n+\n /**\n  * Type of function for user defined mlx5 list entry creation.\n  *\n@@ -375,15 +387,17 @@ typedef struct mlx5_list_entry *(*mlx5_list_create_cb)\n  */\n struct mlx5_list {\n \tchar name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */\n-\tuint32_t entry_sz; /**< Entry size, 0: use create callback. */\n-\trte_rwlock_t lock; /* read/write lock. */\n \tuint32_t gen_cnt; /* List modification will update generation count. */\n \tuint32_t count; /* number of entries in list. */\n \tvoid *ctx; /* user objects target to callback. */\n+\trte_rwlock_t lock; /* read/write lock. */\n \tmlx5_list_create_cb cb_create; /**< entry create callback. */\n \tmlx5_list_match_cb cb_match; /**< entry match callback. */\n \tmlx5_list_remove_cb cb_remove; /**< entry remove callback. */\n-\tLIST_HEAD(mlx5_list_head, mlx5_list_entry) head;\n+\tmlx5_list_clone_cb cb_clone; /**< entry clone callback. */\n+\tmlx5_list_clone_free_cb cb_clone_free;\n+\tstruct mlx5_list_cache cache[RTE_MAX_LCORE + 1];\n+\t/* Lcore cache, last index is the global cache. */\n };\n \n /**\n@@ -393,8 +407,6 @@ struct mlx5_list {\n  *   Pointer to the hast list table.\n  * @param name\n  *   Name of the mlx5 list.\n- * @param entry_size\n- *   Entry size to allocate, 0 to allocate by creation callback.\n  * @param ctx\n  *   Pointer to the list context data.\n  * @param cb_create\n@@ -407,10 +419,12 @@ struct mlx5_list {\n  *   0 on success, otherwise failure.\n  */\n int mlx5_list_create(struct mlx5_list *list,\n-\t\t\t const char *name, uint32_t entry_size, void *ctx,\n+\t\t\t const char *name, void *ctx,\n \t\t\t mlx5_list_create_cb cb_create,\n \t\t\t mlx5_list_match_cb cb_match,\n-\t\t\t mlx5_list_remove_cb cb_remove);\n+\t\t\t mlx5_list_remove_cb cb_remove,\n+\t\t\t mlx5_list_clone_cb cb_clone,\n+\t\t\t mlx5_list_clone_free_cb cb_clone_free);\n \n /**\n  * Search an entry matching the key.\ndiff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c\nindex b10c47fee3..f6cf1928b2 100644\n--- a/drivers/net/mlx5/windows/mlx5_os.c\n+++ b/drivers/net/mlx5/windows/mlx5_os.c\n@@ -608,9 +608,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\terr = ENOTSUP;\n \t\t\tgoto error;\n \t}\n-\tmlx5_list_create(&priv->hrxqs, \"hrxq\", 0, eth_dev,\n+\tmlx5_list_create(&priv->hrxqs, \"hrxq\", eth_dev,\n \t\tmlx5_hrxq_create_cb, mlx5_hrxq_match_cb,\n-\t\tmlx5_hrxq_remove_cb);\n+\t\tmlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,\n+\t\tmlx5_hrxq_clone_free_cb);\n \t/* Query availability of metadata reg_c's. */\n \terr = mlx5_flow_discover_mreg_c(eth_dev);\n \tif (err < 0) {\n",
    "prefixes": [
        "v2",
        "07/22"
    ]
}