get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/137457/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 137457,
    "url": "https://patches.dpdk.org/api/patches/137457/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20240228190607.187958-1-dsosnowski@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240228190607.187958-1-dsosnowski@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240228190607.187958-1-dsosnowski@nvidia.com",
    "date": "2024-02-28T19:06:06",
    "name": "net/mlx5: fix counter cache starvation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "77f09c6cd511ef4597cb5c9a4c2254a0e3d6e913",
    "submitter": {
        "id": 2386,
        "url": "https://patches.dpdk.org/api/people/2386/?format=api",
        "name": "Dariusz Sosnowski",
        "email": "dsosnowski@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20240228190607.187958-1-dsosnowski@nvidia.com/mbox/",
    "series": [
        {
            "id": 31282,
            "url": "https://patches.dpdk.org/api/series/31282/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=31282",
            "date": "2024-02-28T19:06:06",
            "name": "net/mlx5: fix counter cache starvation",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/31282/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/137457/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/137457/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5E65A43C28;\n\tWed, 28 Feb 2024 20:06:43 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3A6CC410F2;\n\tWed, 28 Feb 2024 20:06:43 +0100 (CET)",
            "from NAM02-DM3-obe.outbound.protection.outlook.com\n (mail-dm3nam02on2058.outbound.protection.outlook.com [40.107.95.58])\n by mails.dpdk.org (Postfix) with ESMTP id 1ED6140EE4;\n Wed, 28 Feb 2024 20:06:42 +0100 (CET)",
            "from CH2PR14CA0034.namprd14.prod.outlook.com (2603:10b6:610:56::14)\n by MW4PR12MB6923.namprd12.prod.outlook.com (2603:10b6:303:208::10)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7316.36; Wed, 28 Feb\n 2024 19:06:38 +0000",
            "from CH1PEPF0000A349.namprd04.prod.outlook.com\n (2603:10b6:610:56:cafe::a5) by CH2PR14CA0034.outlook.office365.com\n (2603:10b6:610:56::14) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7292.49 via Frontend\n Transport; Wed, 28 Feb 2024 19:06:37 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n CH1PEPF0000A349.mail.protection.outlook.com (10.167.244.9) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7292.25 via Frontend Transport; Wed, 28 Feb 2024 19:06:37 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 28 Feb\n 2024 11:06:22 -0800",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Wed, 28 Feb\n 2024 11:06:20 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Pt0cLONKwWetF5NvID47XaJzfYmGBjjo1DG4s53tJ+JMAr2H5tLM4DZCc7yvUiXtGTlk5aSCNvw69Lp2WKcnnOX+DlFg8ckEP4rl7zIU+yB+A8x2QWv7/tVcguQvBbAnDaMdgpFZA//4iV8rgwVq2PLa8ZG5QI1YF0+HQiJDvcbL72AAcH4xpA7KcjvZFybi5hntRiyU+ExyTDE2fJNL2EZPJEJ7yAxiyprtN/y6sY2BR7n6ApJxZqd7sVlKX0WuU87SzqvVT0JPG+rstQp9JRRQzTEV5SIN3G5xRSLQl4Ee6RoRnkS67Lao3aAJ76fpehwgPKDd75MIKBGsLlmHMg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=hlVaiHjKumU6BO1DWqJz5z5IRe/8152Ii3BxhWx7/fw=;\n b=crKodGyTmedI06AW+Cb9gennD7Ev6zsTCYfjzbpr/9RmsljAaiIXJewmAgddBaB5fh2SBD4jgsEUL8SMfBS437geDXi8huHlCyE0SLEncAQFVAZHUliJpCW7gnhxucvESZOVEGfYhQHDeF0aGXjrE0ooe3hG3dkIZU9cHIq8lEr+8tjZwOiaDjh1AM5AqhxQ+fAa1RBOOMrWZhhSLvKZ2cM4lbNXSF1MzygLvdvCnMY/MAiXrF8RY3JJ0DWKhaQx3o4KHP0fvpfL5xFRZz6WLbWFnjHUM51TBLqVQ9ytShxxRABZejXslElx4iu4ard3mlTMZNmKbACkGeXinItCfw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=hlVaiHjKumU6BO1DWqJz5z5IRe/8152Ii3BxhWx7/fw=;\n b=g0DtBrjXsbxGhfXuHGuk3zfndD0bmQwL8JikrdLC0cAoUcGcGmWWRqi4NaUZSpEYM3RNYGMQaacRME9BciAgpCGOljv8RKJ+Tk4nIQ9xsUrG1HaPHE78peaclWgDd73VSgWenYdGMJcVD0JCm5fWu2Zk2f4dqG4RCmT1LZ6IsxBW2kLbLla/xkrLQ+7/mn7gy4I50giWE9O0n8nuRoyNSswK7RS8Me79tnz6P3DM5ZcNCNTMtNCn3+xa4+DZJX3ymUrzvH3Ic+hN128G89IEIW3mw03N3tsmu0lgd6Llfw6sV9z9EWyGqApjp1jbcNMxNDyMGVco3lV4AEr+Ja27UA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Dariusz Sosnowski <dsosnowski@nvidia.com>",
        "To": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>,\n \"Xiaoyu Min\" <jackmin@nvidia.com>",
        "CC": "<dev@dpdk.org>, <stable@dpdk.org>, Bing Zhao <bingz@nvidia.com>",
        "Subject": "[PATCH] net/mlx5: fix counter cache starvation",
        "Date": "Wed, 28 Feb 2024 20:06:06 +0100",
        "Message-ID": "<20240228190607.187958-1-dsosnowski@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CH1PEPF0000A349:EE_|MW4PR12MB6923:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "5f804499-73ef-471c-eda0-08dc389063a2",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n yYuVcKDd4kZmjdZWxKIhMvJ1iHYvCj3a1uKViBC5WHqUKuSXctPdQsupbknJcCW9198OEcs5IU5A0pf1/vNDICxlb0qJe3jCbrSYhM1SJ34+5DiCL2vJi+qlGU7kYaaXuqLwzsl23/R1LEFPgakrgzW6Oo884HG+g2gNoweaoyS9zzcEXvCCY1Cnht9BjqMXjn7s9upgE3s2f+0qbDcDj7H+21xPXN+0MsgvZBvgpBBTxmHx+MgbQ98ZvQ5XLT1kjsZ1H0qMEjJPMiMsbRhtLMFpiIAyirKRWol7MPs8ESF64emkqR2K8W2QAKRB0gf4wm0qz3W3Vv+VMyUjy3OvFFvZXCjoyyD0N+xYliXkR6fXmistAiHaemagHVDzevim1DpOiezSe908V2TsfTTvU3Z8bez/3MQ0G4Oc+PfqXb63QPXNUudFNgFnVki5EDg4PBC2sXf4W7pbGkIujpNBothhetWxTn0IcGXzY6HPaqQwANM2lXlZJoadjXSGOADgRdKK9OtiBt9bWL45vbMTq+hTGqIqFkc2lms9JCp/+RK1HqQEU+6V8oWEc0Oq1//nA6QEq4xPYKLt2IYVIdFbEbqYs9smjh2XgXPtsPAvdS9hkpelPl/SVyuh4lTL40atC+Cv/QbATkJvuUQ2x2fLYUrpoXFRkqeTIT5ckbDxEUB7kdYpb8ss87UqneFfDYY8PNEB+rhm+9GUtZoIF1SMEkH9Ai4LlkjZrmhj6dpxItJ8PUvZSqzdrNqFCfUEB7re",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(82310400014)(36860700004); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Feb 2024 19:06:37.5804 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 5f804499-73ef-471c-eda0-08dc389063a2",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CH1PEPF0000A349.namprd04.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW4PR12MB6923",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "mlx5 PMD maintains a global counter pool and per-queue counter cache,\nwhich are used to allocate COUNT flow action objects.\nWhenever an empty cache is accessed, it is replenished\nwith a pre-defined number of counters.\n\nIf number of configured counters was sufficiently small, then\nit might have happened that caches associated with some queues\ncould get starved because all counters were fetched on other queues.\n\nThis patch fixes that by disabling cache at runtime\nif number of configured counters is not sufficient to avoid\nsuch starvation.\n\nFixes: 4d368e1da3a4 (\"net/mlx5: support flow counter action for HWS\")\nCc: jackmin@nvidia.com\nCc: stable@dpdk.org\n\nSigned-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\nAcked-by: Ori Kam <orika@nvidia.com>\nAcked-by: Bing Zhao <bingz@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow_hw.c |  6 +--\n drivers/net/mlx5/mlx5_hws_cnt.c | 72 ++++++++++++++++++++++++---------\n drivers/net/mlx5/mlx5_hws_cnt.h | 25 +++++++++---\n 3 files changed, 74 insertions(+), 29 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex f778fd0698..8ba3b3321e 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -3117,8 +3117,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t\tbreak;\n \t\t\t/* Fall-through. */\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\t\t/* If the port is engaged in resource sharing, do not use queue cache. */\n-\t\t\tcnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;\n+\t\t\tcnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);\n \t\t\tret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);\n \t\t\tif (ret != 0)\n \t\t\t\treturn ret;\n@@ -3757,8 +3756,7 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,\n \t\t}\n \t\treturn;\n \t}\n-\t/* If the port is engaged in resource sharing, do not use queue cache. */\n-\tcnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;\n+\tcnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);\n \t/* Put the counter first to reduce the race risk in BG thread. */\n \tmlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);\n \tflow->cnt_id = 0;\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c\nindex a3bea94811..c31f2f380b 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.c\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.c\n@@ -340,6 +340,55 @@ mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool * const cntp)\n \tmlx5_free(cntp);\n }\n \n+static bool\n+mlx5_hws_cnt_should_enable_cache(const struct mlx5_hws_cnt_pool_cfg *pcfg,\n+\t\t\t\t const struct mlx5_hws_cache_param *ccfg)\n+{\n+\t/*\n+\t * Enable cache if and only if there are enough counters requested\n+\t * to populate all of the caches.\n+\t */\n+\treturn pcfg->request_num >= ccfg->q_num * ccfg->size;\n+}\n+\n+static struct mlx5_hws_cnt_pool_caches *\n+mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,\n+\t\t\tconst struct mlx5_hws_cache_param *ccfg)\n+{\n+\tstruct mlx5_hws_cnt_pool_caches *cache;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tuint32_t qidx;\n+\n+\t/* If counter pool is big enough, setup the counter pool cache. */\n+\tcache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,\n+\t\t\tsizeof(*cache) +\n+\t\t\tsizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0])\n+\t\t\t\t* ccfg->q_num, 0, SOCKET_ID_ANY);\n+\tif (cache == NULL)\n+\t\treturn NULL;\n+\t/* Store the necessary cache parameters. */\n+\tcache->fetch_sz = ccfg->fetch_sz;\n+\tcache->preload_sz = ccfg->preload_sz;\n+\tcache->threshold = ccfg->threshold;\n+\tcache->q_num = ccfg->q_num;\n+\tfor (qidx = 0; qidx < ccfg->q_num; qidx++) {\n+\t\tsnprintf(mz_name, sizeof(mz_name), \"%s_qc/%x\", pcfg->name, qidx);\n+\t\tcache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size,\n+\t\t\t\tSOCKET_ID_ANY,\n+\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ |\n+\t\t\t\tRING_F_EXACT_SZ);\n+\t\tif (cache->qcache[qidx] == NULL)\n+\t\t\tgoto error;\n+\t}\n+\treturn cache;\n+\n+error:\n+\twhile (qidx--)\n+\t\trte_ring_free(cache->qcache[qidx]);\n+\tmlx5_free(cache);\n+\treturn NULL;\n+}\n+\n static struct mlx5_hws_cnt_pool *\n mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,\n \t\t       const struct mlx5_hws_cnt_pool_cfg *pcfg,\n@@ -348,7 +397,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,\n \tchar mz_name[RTE_MEMZONE_NAMESIZE];\n \tstruct mlx5_hws_cnt_pool *cntp;\n \tuint64_t cnt_num = 0;\n-\tuint32_t qidx;\n \n \tMLX5_ASSERT(pcfg);\n \tMLX5_ASSERT(ccfg);\n@@ -360,17 +408,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,\n \tcntp->cfg = *pcfg;\n \tif (cntp->cfg.host_cpool)\n \t\treturn cntp;\n-\tcntp->cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,\n-\t\t\tsizeof(*cntp->cache) +\n-\t\t\tsizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0])\n-\t\t\t\t* ccfg->q_num, 0, SOCKET_ID_ANY);\n-\tif (cntp->cache == NULL)\n-\t\tgoto error;\n-\t /* store the necessary cache parameters. */\n-\tcntp->cache->fetch_sz = ccfg->fetch_sz;\n-\tcntp->cache->preload_sz = ccfg->preload_sz;\n-\tcntp->cache->threshold = ccfg->threshold;\n-\tcntp->cache->q_num = ccfg->q_num;\n \tif (pcfg->request_num > sh->hws_max_nb_counters) {\n \t\tDRV_LOG(ERR, \"Counter number %u \"\n \t\t\t\"is greater than the maximum supported (%u).\",\n@@ -418,13 +455,10 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,\n \t\tDRV_LOG(ERR, \"failed to create reuse list ring\");\n \t\tgoto error;\n \t}\n-\tfor (qidx = 0; qidx < ccfg->q_num; qidx++) {\n-\t\tsnprintf(mz_name, sizeof(mz_name), \"%s_qc/%x\", pcfg->name, qidx);\n-\t\tcntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size,\n-\t\t\t\tSOCKET_ID_ANY,\n-\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ |\n-\t\t\t\tRING_F_EXACT_SZ);\n-\t\tif (cntp->cache->qcache[qidx] == NULL)\n+\t/* Allocate counter cache only if needed. */\n+\tif (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) {\n+\t\tcntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg);\n+\t\tif (cntp->cache == NULL)\n \t\t\tgoto error;\n \t}\n \t/* Initialize the time for aging-out calculation. */\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h\nindex 585b5a83ad..e00596088f 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.h\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.h\n@@ -557,19 +557,32 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,\n }\n \n /**\n- * Check if counter pool allocated for HWS is shared between ports.\n+ * Decide if the given queue can be used to perform counter allocation/deallcation\n+ * based on counter configuration\n  *\n  * @param[in] priv\n  *   Pointer to the port private data structure.\n+ * @param[in] queue\n+ *   Pointer to the queue index.\n  *\n  * @return\n- *   True if counter pools is shared between ports. False otherwise.\n+ *   @p queue if cache related to the queue can be used. NULL otherwise.\n  */\n-static __rte_always_inline bool\n-mlx5_hws_cnt_is_pool_shared(struct mlx5_priv *priv)\n+static __rte_always_inline uint32_t *\n+mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue)\n {\n-\treturn priv && priv->hws_cpool &&\n-\t    (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL);\n+\tif (priv && priv->hws_cpool) {\n+\t\t/* Do not use queue cache if counter pool is shared. */\n+\t\tif (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL)\n+\t\t\treturn NULL;\n+\t\t/* Do not use queue cache if counter cache is disabled. */\n+\t\tif (priv->hws_cpool->cache == NULL)\n+\t\t\treturn NULL;\n+\t\treturn queue;\n+\t}\n+\t/* This case should not be reached if counter pool was successfully configured. */\n+\tMLX5_ASSERT(false);\n+\treturn NULL;\n }\n \n static __rte_always_inline unsigned int\n",
    "prefixes": []
}