get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/134007/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 134007,
    "url": "https://patches.dpdk.org/api/patches/134007/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20231109080751.1311817-1-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231109080751.1311817-1-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231109080751.1311817-1-suanmingm@nvidia.com",
    "date": "2023-11-09T08:07:51",
    "name": "net/mlx5: fix counter query during port close",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "29962a4ddeba33914439a0b953e335b0d04c7109",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20231109080751.1311817-1-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 30210,
            "url": "https://patches.dpdk.org/api/series/30210/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=30210",
            "date": "2023-11-09T08:07:51",
            "name": "net/mlx5: fix counter query during port close",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/30210/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/134007/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/134007/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F27E6432E1;\n\tThu,  9 Nov 2023 09:08:32 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8E7F642DEF;\n\tThu,  9 Nov 2023 09:08:32 +0100 (CET)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2059.outbound.protection.outlook.com [40.107.243.59])\n by mails.dpdk.org (Postfix) with ESMTP id 20E6240267\n for <dev@dpdk.org>; Thu,  9 Nov 2023 09:08:31 +0100 (CET)",
            "from CH2PR16CA0024.namprd16.prod.outlook.com (2603:10b6:610:50::34)\n by SJ1PR12MB6313.namprd12.prod.outlook.com (2603:10b6:a03:458::18)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6954.29; Thu, 9 Nov\n 2023 08:08:29 +0000",
            "from CO1PEPF000044FB.namprd21.prod.outlook.com\n (2603:10b6:610:50:cafe::f6) by CH2PR16CA0024.outlook.office365.com\n (2603:10b6:610:50::34) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6977.19 via Frontend\n Transport; Thu, 9 Nov 2023 08:08:28 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n CO1PEPF000044FB.mail.protection.outlook.com (10.167.241.201) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7002.1 via Frontend Transport; Thu, 9 Nov 2023 08:08:28 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Thu, 9 Nov 2023\n 00:08:09 -0800",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Thu, 9 Nov 2023\n 00:08:06 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=VJ89PKsX5uYxYleTaVpLL99sR/ZzniJdOVZ2pH5L2nUIRZj9Ll35RtL1AOB024mEGm+U2nh08cRRFTo3+TusK8NKhPA/zxtjU4Rnq31JpPD7UhEyw3RMLcoLcqLkGUS0EOaD5XSYddOr9i4pjnBcFpPS/7LFYk8G7+ui2z74BEldY+6sWyZBn1iaz97drjSROMc4D3CHboDpeKcu7UHy8H0pHDmDF0KsTSaiyQ253kd0ADgXkB7oZTk2BWO/ztLtQ/UzY0uMwyVCcAp79KP6lRH7ZY7SjmeMd7MdvOELZG2QnhMs57z1nEeQ9kp/Y4MLZKvxdqCzDIKex0LxpBHbzA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=AVjQwe1zapGSkx9EzZ9YwX22FophL0c9Q7hAsxAxWPU=;\n b=dwD9J8VaanmjhykqH8Nx1yoecov4jkJnblCy96LqxR9qTRtG0o8eprOwRQDcjfdTJg/0ixIR5PpOWY15gF0a8cIo3nkAhQnHxY1O+ethwbBdgC48oM7mXagcTm3mIcycCzy78FKmCtVuJ2Rev1OPre0pJ33obEO1l9h+JmleaIHki5KVIyWIrrapxOhmDDFdTMS5KTImEsol98qpeJ4AK4miqs6QNiaRrQfTAfQN29muBNWQaND6n2Vwf1fO4cVBi0a26FOO5COZBS8yMiJ81sEo2bZIMwXuphwv6mVBKYlsIB3ZHzw0aF/qWNFdG4FMz3q/az1vOySPuHQMOLSaqA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=AVjQwe1zapGSkx9EzZ9YwX22FophL0c9Q7hAsxAxWPU=;\n b=cRBYzZ83QkOfUobpJYb06Wa13yO9WqNqpXBUukVkZtiKHAw2Ah/xE2LHTQHgpzSXuFzoFXKpt92y6pN8xdClEKj7Iysmly1/A2281kngyhd3V04LEntRxX2nDqKwEafaizzYuQ1coo7cHVDIPM6FWoSHB+8BnCmqbp8c3oj+ne/AwSrBXV5SnajO8lHvB3YRIgjqhpXMqh2lQVSmxCCiOCKrvmlyT9DQvnonz9uwfzsuTzpas0S3gFdyUSj9tME11sKp/4FYXu7rXq6Anfz1igRMMHPg+Bh6Si6DQXhITSlC5z1XbrZK0Ujtj54uA0trnkNoS64c0i5PQr3sStVgMA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>",
        "Subject": "[PATCH] net/mlx5: fix counter query during port close",
        "Date": "Thu, 9 Nov 2023 16:07:51 +0800",
        "Message-ID": "<20231109080751.1311817-1-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CO1PEPF000044FB:EE_|SJ1PR12MB6313:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "0515d2e2-ec92-4841-0ccd-08dbe0fb0e8d",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n x+D6MkTSLjEH4od6MnlHy8u1G56Hs9j/EslomKyraIszMDU7L2b7j5fTl318ZyrgD5rUnyDZ4D8YgqlyexwThpD9mp2TtuPhVif/f5DzfKWn5Z6Cp5cbE4bmmANgk2/xhKmrkFwJXPr96QY14cy/kRojZrwzOhMExn3ZY9tMEY0nnkrv+gPvYB9rnmfdY1IRqMP/bPxN/su331EyGZuLrpmcSl+UQztH/gHYrgddJtVt16cAYdLAB4xAezGpdiUOzEbH9cArEBg6wfzLmeUE3XL5IrAUyaIFTf0Wjdjd+i0r2PQsk8Tr2m3CH2QLGiq0dUXLyHAgkGNGiDjZWQhsRuB9L68KZMPY7QqAIwwbiSlXMq0lmAVR56La8XBmsCGzo24v1qnyfdD6IGm5UTzNsdQMk2jlQB70Beo4objUWFjctpM20FogS8fObQEuGFfA44ZatSYWi/8b7zc7502K/N6V6Ghn3+pnVPZ/tfa7OS+WhgdIlltpxxO/yQxS7qcSP8jcY3zFqf08z/4n3DqkoT5b9KntfQVdXR0B9vgcFoCfbp+XZqLwkxgLGlniwLAjkRhfN0p3b4EujMnN9q1GZgawNtkl1Xb1u9+vBtY7A5P4BOzhqiGb/Rj0H1Tex6sAQzO9tHEkLdrTb2Z+YHW1s2CXCuVNGMt2VytQmUXmJC9vJsZDbZQSe/RN7PbvOpLjSKAxRLt7HqB5YsBxlXad3NevKs6Ihx64ovlRXb84+I153kDMfzccekCO8EXTCvZ9",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(4636009)(136003)(396003)(376002)(346002)(39860400002)(230922051799003)(64100799003)(451199024)(1800799009)(186009)(82310400011)(40470700004)(46966006)(36840700001)(40460700003)(36756003)(55016003)(40480700001)(6666004)(7696005)(5660300002)(41300700001)(4326008)(8936002)(8676002)(36860700001)(70586007)(2906002)(2616005)(426003)(316002)(54906003)(6636002)(83380400001)(26005)(70206006)(6286002)(110136005)(1076003)(107886003)(478600001)(47076005)(7636003)(86362001)(336012)(16526019)(356005)(82740400003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "09 Nov 2023 08:08:28.7188 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 0515d2e2-ec92-4841-0ccd-08dbe0fb0e8d",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1PEPF000044FB.namprd21.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SJ1PR12MB6313",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Currently, the counter query service thread queries all the ports\nwhich belongs to the same sh. In case one of the ports is closing\nthe query may still be proceeded.\n\nThis commit adds the pool list in shared context to manage the pool\nfor avoiding query the port during port close.\n\nFixes: 4d368e1da3a4 (\"net/mlx5: support flow counter action for HWS\")\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c         |  3 +++\n drivers/net/mlx5/mlx5.h         |  2 ++\n drivers/net/mlx5/mlx5_hws_cnt.c | 36 ++++++++++++++++++++++-----------\n drivers/net/mlx5/mlx5_hws_cnt.h |  2 ++\n 4 files changed, 31 insertions(+), 12 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 2cf21a1921..d6cb0d1c8a 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1814,6 +1814,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \tLIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);\n \trte_spinlock_init(&sh->geneve_tlv_opt_sl);\n \tmlx5_init_shared_dev_registers(sh);\n+\t/* Init counter pool list header and lock. */\n+\tLIST_INIT(&sh->hws_cpool_list);\n+\trte_spinlock_init(&sh->cpool_lock);\n exit:\n \tpthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);\n \treturn sh;\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex ee13ad6db2..f5eacb2c67 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1521,6 +1521,8 @@ struct mlx5_dev_ctx_shared {\n \tuint32_t host_shaper_rate:8;\n \tuint32_t lwm_triggered:1;\n \tstruct mlx5_hws_cnt_svc_mng *cnt_svc;\n+\trte_spinlock_t cpool_lock;\n+\tLIST_HEAD(hws_cpool_list, mlx5_hws_cnt_pool) hws_cpool_list; /* Count pool list. */\n \tstruct mlx5_dev_registers registers;\n \tstruct mlx5_dev_shared_port port[]; /* per device port data array. */\n };\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c\nindex f556a9fbcc..a3bea94811 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.c\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.c\n@@ -294,26 +294,25 @@ mlx5_hws_cnt_svc(void *opaque)\n \t\t(struct mlx5_dev_ctx_shared *)opaque;\n \tuint64_t interval =\n \t\t(uint64_t)sh->cnt_svc->query_interval * (US_PER_S / MS_PER_S);\n-\tuint16_t port_id;\n+\tstruct mlx5_hws_cnt_pool *hws_cpool;\n \tuint64_t start_cycle, query_cycle = 0;\n \tuint64_t query_us;\n \tuint64_t sleep_us;\n \n \twhile (sh->cnt_svc->svc_running != 0) {\n+\t\tif (rte_spinlock_trylock(&sh->cpool_lock) == 0)\n+\t\t\tcontinue;\n \t\tstart_cycle = rte_rdtsc();\n-\t\tMLX5_ETH_FOREACH_DEV(port_id, sh->cdev->dev) {\n-\t\t\tstruct mlx5_priv *opriv =\n-\t\t\t\trte_eth_devices[port_id].data->dev_private;\n-\t\t\tif (opriv != NULL &&\n-\t\t\t    opriv->sh == sh &&\n-\t\t\t    opriv->hws_cpool != NULL) {\n-\t\t\t\t__mlx5_hws_cnt_svc(sh, opriv->hws_cpool);\n-\t\t\t\tif (opriv->hws_age_req)\n-\t\t\t\t\tmlx5_hws_aging_check(opriv,\n-\t\t\t\t\t\t\t     opriv->hws_cpool);\n-\t\t\t}\n+\t\t/* 200ms for 16M counters. */\n+\t\tLIST_FOREACH(hws_cpool, &sh->hws_cpool_list, next) {\n+\t\t\tstruct mlx5_priv *opriv = hws_cpool->priv;\n+\n+\t\t\t__mlx5_hws_cnt_svc(sh, hws_cpool);\n+\t\t\tif (opriv->hws_age_req)\n+\t\t\t\tmlx5_hws_aging_check(opriv, hws_cpool);\n \t\t}\n \t\tquery_cycle = rte_rdtsc() - start_cycle;\n+\t\trte_spinlock_unlock(&sh->cpool_lock);\n \t\tquery_us = query_cycle / (rte_get_timer_hz() / US_PER_S);\n \t\tsleep_us = interval - query_us;\n \t\tif (interval > query_us)\n@@ -665,6 +664,10 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,\n \tif (ret != 0)\n \t\tgoto error;\n \tpriv->sh->cnt_svc->refcnt++;\n+\tcpool->priv = priv;\n+\trte_spinlock_lock(&priv->sh->cpool_lock);\n+\tLIST_INSERT_HEAD(&priv->sh->hws_cpool_list, cpool, next);\n+\trte_spinlock_unlock(&priv->sh->cpool_lock);\n \treturn cpool;\n error:\n \tmlx5_hws_cnt_pool_destroy(priv->sh, cpool);\n@@ -677,6 +680,13 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh,\n {\n \tif (cpool == NULL)\n \t\treturn;\n+\t/*\n+\t * 16M counter consumes 200ms to finish the query.\n+\t * Maybe blocked for at most 200ms here.\n+\t */\n+\trte_spinlock_lock(&sh->cpool_lock);\n+\tLIST_REMOVE(cpool, next);\n+\trte_spinlock_unlock(&sh->cpool_lock);\n \tif (cpool->cfg.host_cpool == NULL) {\n \t\tif (--sh->cnt_svc->refcnt == 0)\n \t\t\tmlx5_hws_cnt_svc_deinit(sh);\n@@ -1244,11 +1254,13 @@ mlx5_hws_age_pool_destroy(struct mlx5_priv *priv)\n {\n \tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n \n+\trte_spinlock_lock(&priv->sh->cpool_lock);\n \tMLX5_ASSERT(priv->hws_age_req);\n \tmlx5_hws_age_info_destroy(priv);\n \tmlx5_ipool_destroy(age_info->ages_ipool);\n \tage_info->ages_ipool = NULL;\n \tpriv->hws_age_req = 0;\n+\trte_spinlock_unlock(&priv->sh->cpool_lock);\n }\n \n #endif\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h\nindex dcd5cec020..585b5a83ad 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.h\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.h\n@@ -98,6 +98,7 @@ struct mlx5_hws_cnt_pool_caches {\n };\n \n struct mlx5_hws_cnt_pool {\n+\tLIST_ENTRY(mlx5_hws_cnt_pool) next;\n \tstruct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;\n \tstruct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;\n \tuint32_t query_gen __rte_cache_aligned;\n@@ -108,6 +109,7 @@ struct mlx5_hws_cnt_pool {\n \tstruct rte_ring *wait_reset_list;\n \tstruct mlx5_hws_cnt_pool_caches *cache;\n \tuint64_t time_of_last_age_check;\n+\tstruct mlx5_priv *priv;\n } __rte_cache_aligned;\n \n /* HWS AGE status. */\n",
    "prefixes": []
}