get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95059/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95059,
    "url": "https://patches.dpdk.org/api/patches/95059/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210630124609.8711-5-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210630124609.8711-5-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210630124609.8711-5-suanmingm@nvidia.com",
    "date": "2021-06-30T12:45:51",
    "name": "[v2,04/22] net/mlx5: replace flow list with index pool",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "78d326ac3edbb7361464d5082099452b2a6fa420",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210630124609.8711-5-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17549,
            "url": "https://patches.dpdk.org/api/series/17549/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17549",
            "date": "2021-06-30T12:45:47",
            "name": "net/mlx5: insertion rate optimization",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/17549/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/95059/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/95059/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 21038A0A0F;\n\tWed, 30 Jun 2021 14:47:07 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8727141282;\n\tWed, 30 Jun 2021 14:46:50 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2060.outbound.protection.outlook.com [40.107.243.60])\n by mails.dpdk.org (Postfix) with ESMTP id 4E2634127D\n for <dev@dpdk.org>; Wed, 30 Jun 2021 14:46:47 +0200 (CEST)",
            "from BN6PR20CA0050.namprd20.prod.outlook.com (2603:10b6:404:151::12)\n by DM4PR12MB5342.namprd12.prod.outlook.com (2603:10b6:5:39f::5) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.20; Wed, 30 Jun\n 2021 12:46:43 +0000",
            "from BN8NAM11FT042.eop-nam11.prod.protection.outlook.com\n (2603:10b6:404:151:cafe::cb) by BN6PR20CA0050.outlook.office365.com\n (2603:10b6:404:151::12) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.22 via Frontend\n Transport; Wed, 30 Jun 2021 12:46:43 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT042.mail.protection.outlook.com (10.13.177.85) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4287.22 via Frontend Transport; Wed, 30 Jun 2021 12:46:43 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 30 Jun\n 2021 12:46:32 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Gl1m01jJpaXcnJopbq08hiTNq6AfNOu88FBWXlU7rldbBHQ+98S1w159wNaPY+Sd04iZBTJ+nRQ26+0q8G7sM3HZdcu3nq9jcMMnAm9PZi6osDip5bfLeuRE4MSWkVdX88delClkNVL3KaSOB14FJZZoqosFpk1sSmz0ppuDghjXtOvyIHaOAZJV29Q/gQjV2kA2hvCvyxpbZ5mQiq98HY01+06kF2MB/vxII7+lOYu2LoI8yPkBOXEcOIHNGCfsyp4b3tIIZDcr5jVkxBQdEAm4y/njZ002T9W6uFyAXjN4iKymCXAz8EQVOLYTFVJ5AfmF3paJcAFrJXppWYOIOA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=SHtFSycpD02ubeP0fXQk/ILHiwPwRZqbMbkNl8uxoCQ=;\n b=WHkCszMbwto1QwZ7YpyU5WIlxG9Z2zUDsCKukJrXIDhJ5bqAmN3+scEXqO4OILRdmZj8YAUtC5thNstcUQ0MIdgSU6hqag94XRsv42/3HmJc+NHnmjKu2U7yZRvKdUY2B7Q7985IwkNS41HB0hjV9irHxe4FDyUVa4QkuCerOBYMjS90X/ncZcAeN9iX7S5Wr/0PzbEICF+jLps5i75Xv9R25cYnRU6alNmYehcMWnP/WQX1WYWSeo5zMchl/QhOURvoUXwQtu/aiiQZdVHJ9W8TW1oTVMS4WchtcJuwY18G0psAuA3TGXZfuIUJ2IjamgKtZujaR15/8fPaLiAM3g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=SHtFSycpD02ubeP0fXQk/ILHiwPwRZqbMbkNl8uxoCQ=;\n b=oOPcCozshldZcG883JwYMBrDGUC+kUBGEGLz/J8FixEXZBqekWLfP5h1IVdl+VfOn+07oRpDnSq3r90zmqQ+qvCt/uJN+Ck0WhPucS9mmeZLsjYuGEEGwzV5UmzXMjXyg5LaN3hX3i5QA/uEqB98CVwDqcfbpC7P9pwgrAZkWHrXcDT6/ouHrEYfN52xcvupsr+5BFHDQyeDvDK/9stMAr+O/rmSWS8UO1m9Y0TtJM88JCgjDjbvTY0drv9SJeUjJuPSLknyAwJiV70ZPctlygTyyi7e5Ejd7G107KPSKFQmwQ1v/FrRppgt3ppkmCLhBVkY/P+sHwYuTLMoR42fbQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Wed, 30 Jun 2021 15:45:51 +0300",
        "Message-ID": "<20210630124609.8711-5-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210630124609.8711-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210630124609.8711-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "18d8f905-d7d6-4c2c-28f4-08d93bc51d43",
        "X-MS-TrafficTypeDiagnostic": "DM4PR12MB5342:",
        "X-Microsoft-Antispam-PRVS": "\n <DM4PR12MB53426284F91AE9D7C15434E7C1019@DM4PR12MB5342.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:269;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n JRXOig6Q4JbSEp6e0MuKteVmIgD8vYJh9LBWANDKItuFhfL6KFg3X+7z0Ir9kQK4ygmcQ0vUJ6yxwGSojzAoLsCZnoEAStIrZ4bxrfBvAlizs0U0lQ2+OFgBIlHVEyh7fza89IZylUnwmd5sp+H1NLXRuWhG3+A/9ikGIFDUegG6X5CJYECg7f3vhK0ED443f62270BCncnb3h3rsLAX9rTgcQJmioBq4C6F0Io9LTP72PLgPbVQotKh1b9KWjFbdiLgR+RLI+b1k6/aBeZMmuZJ35C7cEr4fooll/uDJeCiESHeFvHEOGjUtHft5Kt7ujO/YFE/mpgwxJz+3AlYGDlrL0ntGg4ryA0v28qjc6yguAU3Lomhsc6fxHPU8y9XSBmjXgAcff3FWRR97mYb/0qUu6mC7PsDpsZEe2zi8NrVBaAmRb6v5RQBvHry3sDWo86vverev2IYJOc0VIap/dfNVbOR136IYX4ZJ534oLd4NF3KQoHtgUlJbuoOlR+Ed78Np8OtbUps7gEraZvajJ1tPqLwB6yaz3n3rxTrzK3bkZKZ5BqFYYhG3D1KFnQ3+FSYyR4lZIRVhnhz3K0DEI8SjRw0T6Fr0aVYiV6BrXrbm/zVDa2xz3Mr+6l31OXtcCBFuruudUMuLB86cCuW9at7uO2fmtvwk3Sk11ba0vhPtGoNPr70u1d69fXWePKGsDulUsqZbk6LrQGuLcIbYjXTs7DFRRSmrnMZ4yM5s9U=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(396003)(136003)(39860400002)(376002)(346002)(36840700001)(46966006)(82310400003)(82740400003)(7696005)(8936002)(316002)(6286002)(6636002)(26005)(36860700001)(55016002)(7636003)(186003)(336012)(54906003)(86362001)(83380400001)(356005)(4326008)(110136005)(8676002)(70206006)(478600001)(426003)(36756003)(47076005)(70586007)(5660300002)(1076003)(2616005)(2906002)(30864003)(16526019)(6666004)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Jun 2021 12:46:43.3513 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 18d8f905-d7d6-4c2c-28f4-08d93bc51d43",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT042.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB5342",
        "Subject": "[dpdk-dev] [PATCH v2 04/22] net/mlx5: replace flow list with index\n pool",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The flow list is used to save the create flows and to be used only\nwhen port closes all the flows need to be flushed.\n\nThis commit takes advantage of the index pool foreach operation to\nflush all the allocated flows.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c   |  48 +++++++++-\n drivers/net/mlx5/mlx5.c            |   9 +-\n drivers/net/mlx5/mlx5.h            |  14 ++-\n drivers/net/mlx5/mlx5_flow.c       | 149 ++++++++++-------------------\n drivers/net/mlx5/mlx5_flow.h       |   2 +-\n drivers/net/mlx5/mlx5_flow_dv.c    |   5 +\n drivers/net/mlx5/mlx5_trigger.c    |   8 +-\n drivers/net/mlx5/windows/mlx5_os.c |   1 -\n 8 files changed, 126 insertions(+), 110 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 92b3009786..31cc8d9eb8 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -69,6 +69,44 @@ static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;\n /* Process local data for secondary processes. */\n static struct mlx5_local_data mlx5_local_data;\n \n+/* rte flow indexed pool configuration. */\n+static struct mlx5_indexed_pool_config icfg[] = {\n+\t{\n+\t\t.size = sizeof(struct rte_flow),\n+\t\t.trunk_size = 64,\n+\t\t.need_lock = 1,\n+\t\t.release_mem_en = 0,\n+\t\t.malloc = mlx5_malloc,\n+\t\t.free = mlx5_free,\n+\t\t.per_core_cache = 0,\n+\t\t.type = \"ctl_flow_ipool\",\n+\t},\n+\t{\n+\t\t.size = sizeof(struct rte_flow),\n+\t\t.trunk_size = 64,\n+\t\t.grow_trunk = 3,\n+\t\t.grow_shift = 2,\n+\t\t.need_lock = 1,\n+\t\t.release_mem_en = 0,\n+\t\t.malloc = mlx5_malloc,\n+\t\t.free = mlx5_free,\n+\t\t.per_core_cache = 1 << 14,\n+\t\t.type = \"rte_flow_ipool\",\n+\t},\n+\t{\n+\t\t.size = sizeof(struct rte_flow),\n+\t\t.trunk_size = 64,\n+\t\t.grow_trunk = 3,\n+\t\t.grow_shift = 2,\n+\t\t.need_lock = 1,\n+\t\t.release_mem_en = 0,\n+\t\t.malloc = mlx5_malloc,\n+\t\t.free = mlx5_free,\n+\t\t.per_core_cache = 0,\n+\t\t.type = \"mcp_flow_ipool\",\n+\t},\n+};\n+\n /**\n  * Set the completion channel file descriptor interrupt as non-blocking.\n  *\n@@ -823,6 +861,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tint own_domain_id = 0;\n \tuint16_t port_id;\n \tstruct mlx5_port_info vport_info = { .query_flags = 0 };\n+\tint i;\n \n \t/* Determine if this port representor is supposed to be spawned. */\n \tif (switch_info->representor && dpdk_dev->devargs &&\n@@ -1566,7 +1605,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\t\t      mlx5_ifindex(eth_dev),\n \t\t\t\t      eth_dev->data->mac_addrs,\n \t\t\t\t      MLX5_MAX_MAC_ADDRESSES);\n-\tpriv->flows = 0;\n \tpriv->ctrl_flows = 0;\n \trte_spinlock_init(&priv->flow_list_lock);\n \tTAILQ_INIT(&priv->flow_meters);\n@@ -1600,6 +1638,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tmlx5_set_min_inline(spawn, config);\n \t/* Store device configuration on private structure. */\n \tpriv->config = *config;\n+\tfor (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {\n+\t\ticfg[i].release_mem_en = !!config->reclaim_mode;\n+\t\tif (config->reclaim_mode)\n+\t\t\ticfg[i].per_core_cache = 0;\n+\t\tpriv->flows[i] = mlx5_ipool_create(&icfg[i]);\n+\t\tif (!priv->flows[i])\n+\t\t\tgoto error;\n+\t}\n \t/* Create context for virtual machine VLAN workaround. */\n \tpriv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);\n \tif (config->dv_flow_en) {\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex cf1815cb74..fcfc3dcdca 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -322,7 +322,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.grow_trunk = 3,\n \t\t.grow_shift = 2,\n \t\t.need_lock = 1,\n-\t\t.release_mem_en = 1,\n+\t\t.release_mem_en = 0,\n+\t\t.per_core_cache = 1 << 19,\n \t\t.malloc = mlx5_malloc,\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_flow_handle_ipool\",\n@@ -792,8 +793,10 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,\n \t\t\t\tMLX5_FLOW_HANDLE_VERBS_SIZE;\n \t\t\tbreak;\n \t\t}\n-\t\tif (config->reclaim_mode)\n+\t\tif (config->reclaim_mode) {\n \t\t\tcfg.release_mem_en = 1;\n+\t\t\tcfg.per_core_cache = 0;\n+\t\t}\n \t\tsh->ipool[i] = mlx5_ipool_create(&cfg);\n \t}\n }\n@@ -1528,7 +1531,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t * If all the flows are already flushed in the device stop stage,\n \t * then this will return directly without any action.\n \t */\n-\tmlx5_flow_list_flush(dev, &priv->flows, true);\n+\tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);\n \tmlx5_action_handle_flush(dev);\n \tmlx5_flow_meter_flush(dev, NULL);\n \t/* Prevent crashes when queues are still in use. */\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 32b2817bf2..5fa5d3cb99 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -71,6 +71,14 @@ enum mlx5_reclaim_mem_mode {\n \tMLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */\n };\n \n+/* The type of flow. */\n+enum mlx5_flow_type {\n+\tMLX5_FLOW_TYPE_CTL, /* Control flow. */\n+\tMLX5_FLOW_TYPE_GEN, /* General flow. */\n+\tMLX5_FLOW_TYPE_MCP, /* MCP flow. */\n+\tMLX5_FLOW_TYPE_MAXI,\n+};\n+\n /* Hash and cache list callback context. */\n struct mlx5_flow_cb_ctx {\n \tstruct rte_eth_dev *dev;\n@@ -1344,7 +1352,8 @@ struct mlx5_priv {\n \tunsigned int (*reta_idx)[]; /* RETA index table. */\n \tunsigned int reta_idx_n; /* RETA index size. */\n \tstruct mlx5_drop drop_queue; /* Flow drop queues. */\n-\tuint32_t flows; /* RTE Flow rules. */\n+\tstruct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];\n+\t/* RTE Flow rules. */\n \tuint32_t ctrl_flows; /* Control flow rules. */\n \trte_spinlock_t flow_list_lock;\n \tstruct mlx5_obj_ops obj_ops; /* HW objects operations. */\n@@ -1596,7 +1605,8 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,\n \t\t\t\t  struct rte_flow_error *error);\n int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t      struct rte_flow_error *error);\n-void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active);\n+void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n+\t\t\t  bool active);\n int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);\n int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t    const struct rte_flow_action *action, void *data,\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex c5d4a95a8f..20ce0ed424 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -3095,31 +3095,6 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,\n \t\t\t\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n }\n \n-/**\n- * Release resource related QUEUE/RSS action split.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param flow\n- *   Flow to release id's from.\n- */\n-static void\n-flow_mreg_split_qrss_release(struct rte_eth_dev *dev,\n-\t\t\t     struct rte_flow *flow)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tuint32_t handle_idx;\n-\tstruct mlx5_flow_handle *dev_handle;\n-\n-\tSILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,\n-\t\t       handle_idx, dev_handle, next)\n-\t\tif (dev_handle->split_flow_id &&\n-\t\t    !dev_handle->is_meter_flow_id)\n-\t\t\tmlx5_ipool_free(priv->sh->ipool\n-\t\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],\n-\t\t\t\t\tdev_handle->split_flow_id);\n-}\n-\n static int\n flow_null_validate(struct rte_eth_dev *dev __rte_unused,\n \t\t   const struct rte_flow_attr *attr __rte_unused,\n@@ -3415,7 +3390,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)\n \tconst struct mlx5_flow_driver_ops *fops;\n \tenum mlx5_flow_drv_type type = flow->drv_type;\n \n-\tflow_mreg_split_qrss_release(dev, flow);\n \tMLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);\n \tfops = flow_get_drv_ops(type);\n \tfops->destroy(dev, flow);\n@@ -3998,14 +3972,14 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,\n \n /* Declare flow create/destroy prototype in advance. */\n static uint32_t\n-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n \t\t const struct rte_flow_attr *attr,\n \t\t const struct rte_flow_item items[],\n \t\t const struct rte_flow_action actions[],\n \t\t bool external, struct rte_flow_error *error);\n \n static void\n-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,\n+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n \t\t  uint32_t flow_idx);\n \n int\n@@ -4127,8 +4101,8 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,\n \t * be applied, removed, deleted in ardbitrary order\n \t * by list traversing.\n \t */\n-\tmcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,\n-\t\t\t\t\t actions, false, error);\n+\tmcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,\n+\t\t\t\t\t&attr, items, actions, false, error);\n \tif (!mcp_res->rix_flow) {\n \t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);\n \t\treturn NULL;\n@@ -4190,7 +4164,7 @@ flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n \tMLX5_ASSERT(mcp_res->rix_flow);\n-\tflow_list_destroy(dev, NULL, mcp_res->rix_flow);\n+\tflow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);\n \tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);\n }\n \n@@ -6093,7 +6067,7 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,\n  *   A flow index on success, 0 otherwise and rte_errno is set.\n  */\n static uint32_t\n-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n \t\t const struct rte_flow_attr *attr,\n \t\t const struct rte_flow_item items[],\n \t\t const struct rte_flow_action original_actions[],\n@@ -6161,7 +6135,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t\t\t\texternal, hairpin_flow, error);\n \tif (ret < 0)\n \t\tgoto error_before_hairpin_split;\n-\tflow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);\n+\tflow = mlx5_ipool_zmalloc(priv->flows[type], &idx);\n \tif (!flow) {\n \t\trte_errno = ENOMEM;\n \t\tgoto error_before_hairpin_split;\n@@ -6291,12 +6265,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t\tif (ret < 0)\n \t\t\tgoto error;\n \t}\n-\tif (list) {\n-\t\trte_spinlock_lock(&priv->flow_list_lock);\n-\t\tILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,\n-\t\t\t     flow, next);\n-\t\trte_spinlock_unlock(&priv->flow_list_lock);\n-\t}\n+\tflow->type = type;\n \tflow_rxq_flags_set(dev, flow);\n \trte_free(translated_actions);\n \ttunnel = flow_tunnel_from_rule(wks->flows);\n@@ -6318,7 +6287,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t\t\tmlx5_ipool_get\n \t\t\t(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n \t\t\trss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);\n-\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);\n+\tmlx5_ipool_free(priv->flows[type], idx);\n \trte_errno = ret; /* Restore rte_errno. */\n \tret = rte_errno;\n \trte_errno = ret;\n@@ -6370,10 +6339,9 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)\n \t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n \t\t},\n \t};\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct rte_flow_error error;\n \n-\treturn (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,\n+\treturn (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,\n \t\t\t\t\t\t   &attr, &pattern,\n \t\t\t\t\t\t   actions, false, &error);\n }\n@@ -6425,8 +6393,6 @@ mlx5_flow_create(struct rte_eth_dev *dev,\n \t\t const struct rte_flow_action actions[],\n \t\t struct rte_flow_error *error)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\n \t/*\n \t * If the device is not started yet, it is not allowed to created a\n \t * flow from application. PMD default flows and traffic control flows\n@@ -6442,8 +6408,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,\n \t\treturn NULL;\n \t}\n \n-\treturn (void *)(uintptr_t)flow_list_create(dev, &priv->flows,\n-\t\t\t\t  attr, items, actions, true, error);\n+\treturn (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN,\n+\t\t\t\t\t\t   attr, items, actions,\n+\t\t\t\t\t\t   true, error);\n }\n \n /**\n@@ -6451,24 +6418,19 @@ mlx5_flow_create(struct rte_eth_dev *dev,\n  *\n  * @param dev\n  *   Pointer to Ethernet device.\n- * @param list\n- *   Pointer to the Indexed flow list. If this parameter NULL,\n- *   there is no flow removal from the list. Be noted that as\n- *   flow is add to the indexed list, memory of the indexed\n- *   list points to maybe changed as flow destroyed.\n  * @param[in] flow_idx\n  *   Index of flow to destroy.\n  */\n static void\n-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,\n+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n \t\t  uint32_t flow_idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool\n-\t\t\t\t\t       [MLX5_IPOOL_RTE_FLOW], flow_idx);\n+\tstruct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);\n \n \tif (!flow)\n \t\treturn;\n+\tMLX5_ASSERT(flow->type == type);\n \t/*\n \t * Update RX queue flags only if port is started, otherwise it is\n \t * already clean.\n@@ -6476,12 +6438,6 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,\n \tif (dev->data->dev_started)\n \t\tflow_rxq_flags_trim(dev, flow);\n \tflow_drv_destroy(dev, flow);\n-\tif (list) {\n-\t\trte_spinlock_lock(&priv->flow_list_lock);\n-\t\tILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,\n-\t\t\t     flow_idx, flow, next);\n-\t\trte_spinlock_unlock(&priv->flow_list_lock);\n-\t}\n \tif (flow->tunnel) {\n \t\tstruct mlx5_flow_tunnel *tunnel;\n \n@@ -6491,7 +6447,7 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,\n \t\t\tmlx5_flow_tunnel_free(dev, tunnel);\n \t}\n \tflow_mreg_del_copy_action(dev, flow);\n-\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);\n+\tmlx5_ipool_free(priv->flows[type], flow_idx);\n }\n \n /**\n@@ -6499,18 +6455,21 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,\n  *\n  * @param dev\n  *   Pointer to Ethernet device.\n- * @param list\n- *   Pointer to the Indexed flow list.\n+ * @param type\n+ *   Flow type to be flushed.\n  * @param active\n  *   If flushing is called avtively.\n  */\n void\n-mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)\n+mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,\n+\t\t     bool active)\n {\n-\tuint32_t num_flushed = 0;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tuint32_t num_flushed = 0, fidx = 1;\n+\tstruct rte_flow *flow;\n \n-\twhile (*list) {\n-\t\tflow_list_destroy(dev, list, *list);\n+\tMLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {\n+\t\tflow_list_destroy(dev, type, fidx);\n \t\tnum_flushed++;\n \t}\n \tif (active) {\n@@ -6682,18 +6641,19 @@ mlx5_flow_pop_thread_workspace(void)\n  * @return the number of flows not released.\n  */\n int\n-mlx5_flow_verify(struct rte_eth_dev *dev)\n+mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct rte_flow *flow;\n-\tuint32_t idx;\n-\tint ret = 0;\n+\tuint32_t idx = 0;\n+\tint ret = 0, i;\n \n-\tILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,\n-\t\t      flow, next) {\n-\t\tDRV_LOG(DEBUG, \"port %u flow %p still referenced\",\n-\t\t\tdev->data->port_id, (void *)flow);\n-\t\t++ret;\n+\tfor (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {\n+\t\tMLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) {\n+\t\t\tDRV_LOG(DEBUG, \"port %u flow %p still referenced\",\n+\t\t\t\tdev->data->port_id, (void *)flow);\n+\t\t\tret++;\n+\t\t}\n \t}\n \treturn ret;\n }\n@@ -6713,7 +6673,6 @@ int\n mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,\n \t\t\t    uint32_t queue)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_attr attr = {\n \t\t.egress = 1,\n \t\t.priority = 0,\n@@ -6746,8 +6705,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,\n \tactions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;\n \tactions[0].conf = &jump;\n \tactions[1].type = RTE_FLOW_ACTION_TYPE_END;\n-\tflow_idx = flow_list_create(dev, &priv->ctrl_flows,\n-\t\t\t\t&attr, items, actions, false, &error);\n+\tflow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,\n+\t\t\t\t    &attr, items, actions, false, &error);\n \tif (!flow_idx) {\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"Failed to create ctrl flow: rte_errno(%d),\"\n@@ -6836,8 +6795,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,\n \t\taction_rss.types = 0;\n \tfor (i = 0; i != priv->reta_idx_n; ++i)\n \t\tqueue[i] = (*priv->reta_idx)[i];\n-\tflow_idx = flow_list_create(dev, &priv->ctrl_flows,\n-\t\t\t\t&attr, items, actions, false, &error);\n+\tflow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,\n+\t\t\t\t    &attr, items, actions, false, &error);\n \tif (!flow_idx)\n \t\treturn -rte_errno;\n \treturn 0;\n@@ -6878,7 +6837,6 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,\n int\n mlx5_flow_lacp_miss(struct rte_eth_dev *dev)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \t/*\n \t * The LACP matching is done by only using ether type since using\n \t * a multicast dst mac causes kernel to give low priority to this flow.\n@@ -6912,8 +6870,9 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev)\n \t\t},\n \t};\n \tstruct rte_flow_error error;\n-\tuint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,\n-\t\t\t\t&attr, items, actions, false, &error);\n+\tuint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,\n+\t\t\t\t\t&attr, items, actions,\n+\t\t\t\t\tfalse, &error);\n \n \tif (!flow_idx)\n \t\treturn -rte_errno;\n@@ -6931,9 +6890,8 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,\n \t\t  struct rte_flow *flow,\n \t\t  struct rte_flow_error *error __rte_unused)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\n-\tflow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);\n+\tflow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,\n+\t\t\t\t(uintptr_t)(void *)flow);\n \treturn 0;\n }\n \n@@ -6947,9 +6905,7 @@ int\n mlx5_flow_flush(struct rte_eth_dev *dev,\n \t\tstruct rte_flow_error *error __rte_unused)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\n-\tmlx5_flow_list_flush(dev, &priv->flows, false);\n+\tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false);\n \treturn 0;\n }\n \n@@ -7000,8 +6956,7 @@ flow_drv_query(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct mlx5_flow_driver_ops *fops;\n-\tstruct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool\n-\t\t\t\t\t       [MLX5_IPOOL_RTE_FLOW],\n+\tstruct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],\n \t\t\t\t\t       flow_idx);\n \tenum mlx5_flow_drv_type ftype;\n \n@@ -7867,14 +7822,14 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)\n \t\tif (!config->dv_flow_en)\n \t\t\tbreak;\n \t\t/* Create internal flow, validation skips copy action. */\n-\t\tflow_idx = flow_list_create(dev, NULL, &attr, items,\n-\t\t\t\t\t    actions, false, &error);\n-\t\tflow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],\n+\t\tflow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,\n+\t\t\t\t\titems, actions, false, &error);\n+\t\tflow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],\n \t\t\t\t      flow_idx);\n \t\tif (!flow)\n \t\t\tcontinue;\n \t\tconfig->flow_mreg_c[n++] = idx;\n-\t\tflow_list_destroy(dev, NULL, flow_idx);\n+\t\tflow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);\n \t}\n \tfor (; n < MLX5_MREG_C_NUM; ++n)\n \t\tconfig->flow_mreg_c[n] = REG_NON;\n@@ -7918,8 +7873,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,\n \t\t\t\t\tsh->rx_domain,\n \t\t\t\t\tsh->tx_domain, file);\n \t/* dump one */\n-\tflow = mlx5_ipool_get(priv->sh->ipool\n-\t\t\t[MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);\n+\tflow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],\n+\t\t\t(uintptr_t)(void *)flow_idx);\n \tif (!flow)\n \t\treturn -ENOENT;\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 2f2aa962f9..d9b6acaafd 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -997,9 +997,9 @@ flow_items_to_tunnel(const struct rte_flow_item items[])\n \n /* Flow structure. */\n struct rte_flow {\n-\tILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */\n \tuint32_t dev_handles;\n \t/**< Device flow handles that are part of the flow. */\n+\tuint32_t type:2;\n \tuint32_t drv_type:2; /**< Driver type. */\n \tuint32_t tunnel:1;\n \tuint32_t meter:24; /**< Holds flow meter id. */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex c5d4b01e57..67f7243503 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -13844,6 +13844,11 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)\n \t\t    dev_handle->split_flow_id)\n \t\t\tmlx5_ipool_free(fm->flow_ipool,\n \t\t\t\t\tdev_handle->split_flow_id);\n+\t\telse if (dev_handle->split_flow_id &&\n+\t\t    !dev_handle->is_meter_flow_id)\n+\t\t\tmlx5_ipool_free(priv->sh->ipool\n+\t\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],\n+\t\t\t\t\tdev_handle->split_flow_id);\n \t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],\n \t\t\t   tmp_idx);\n \t}\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex ae7fcca229..7cb8920d6b 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -1187,7 +1187,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)\n \t/* Control flows for default traffic can be removed firstly. */\n \tmlx5_traffic_disable(dev);\n \t/* All RX queue flags will be cleared in the flush interface. */\n-\tmlx5_flow_list_flush(dev, &priv->flows, true);\n+\tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);\n \tmlx5_flow_meter_rxq_flush(dev);\n \tmlx5_rx_intr_vec_disable(dev);\n \tpriv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;\n@@ -1370,7 +1370,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)\n \treturn 0;\n error:\n \tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tmlx5_flow_list_flush(dev, &priv->ctrl_flows, false);\n+\tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);\n \trte_errno = ret; /* Restore rte_errno. */\n \treturn -rte_errno;\n }\n@@ -1385,9 +1385,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)\n void\n mlx5_traffic_disable(struct rte_eth_dev *dev)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\n-\tmlx5_flow_list_flush(dev, &priv->ctrl_flows, false);\n+\tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);\n }\n \n /**\ndiff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c\nindex 3fe3f55f49..7d15c998bb 100644\n--- a/drivers/net/mlx5/windows/mlx5_os.c\n+++ b/drivers/net/mlx5/windows/mlx5_os.c\n@@ -563,7 +563,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \teth_dev->rx_queue_count = mlx5_rx_queue_count;\n \t/* Register MAC address. */\n \tclaim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));\n-\tpriv->flows = 0;\n \tpriv->ctrl_flows = 0;\n \tTAILQ_INIT(&priv->flow_meters);\n \tTAILQ_INIT(&priv->flow_meter_profiles);\n",
    "prefixes": [
        "v2",
        "04/22"
    ]
}