get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83983/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83983,
    "url": "http://patches.dpdk.org/api/patches/83983/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201111071417.21177-3-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201111071417.21177-3-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201111071417.21177-3-getelson@nvidia.com",
    "date": "2020-11-11T07:14:15",
    "name": "[2/4] net/mlx5: fix tunnel offload hub multi-thread protection",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a2345b00e82f5bc979b0a422a8140f927784c75f",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201111071417.21177-3-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 13799,
            "url": "http://patches.dpdk.org/api/series/13799/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13799",
            "date": "2020-11-11T07:14:13",
            "name": "restore tunnel offload functionality in mlx5",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/13799/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/83983/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/83983/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F1BF2A09D2;\n\tWed, 11 Nov 2020 08:15:15 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id AAD295928;\n\tWed, 11 Nov 2020 08:14:44 +0100 (CET)",
            "from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com\n [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id 63E035913\n for <dev@dpdk.org>; Wed, 11 Nov 2020 08:14:41 +0100 (CET)",
            "from hqmail.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA)\n id <B5fab8f630000>; Tue, 10 Nov 2020 23:14:43 -0800",
            "from nvidia.com (172.20.13.39) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Wed, 11 Nov\n 2020 07:14:37 +0000"
        ],
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>, \"Shahaf\n Shuler\" <shahafs@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>",
        "Date": "Wed, 11 Nov 2020 09:14:15 +0200",
        "Message-ID": "<20201111071417.21177-3-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20201111071417.21177-1-getelson@nvidia.com>",
        "References": "<20201111071417.21177-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "quoted-printable",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.13.39]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1605078883; bh=pUao8z9xjaIkiJfOlGghNSW2sxINSJ+Le1g/WRpApT8=;\n h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To:\n References:MIME-Version:Content-Transfer-Encoding:Content-Type:\n X-Originating-IP:X-ClientProxiedBy;\n b=gWT65q4T2vU60hOAqYPyzb0+5ztzxomHYwIRKjSpyQmdZ/dVX9bX1f0Fd2Q/xH+u3\n v0Uj5ZSjCDIjW+VXEQha0l0EAJ2gekwj4vWKtGsvsNuib2RQ5GfOnbIvxI+JI4V2rl\n PsD3fBbcLfNitUOYVq/2n7lopLOGphZWKRk39MZVnqrgELnAYKTuqgZgBxYX6Y383A\n WacQuMqx/F1ejKSuvGdu/cQjtcLcQICcLWH2h0ZpS9An4NbudF1Xs134QQEXaIen+w\n 1ncEN7RCe+z4suoRy/GnnsNWmoglHxoXFsAGpZaZ/R1rEe+klQKf2oE+Xn7bElz95S\n u4Gu5NE4t27Cg==",
        "Subject": "[dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub\n\tmulti-thread protection",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The original patch was removing active tunnel offload objects from a\ntunnels db list. That action was leading to a PMD crash.\n\nCurrent patch isolates tunnels db list into a separate API. That API\nmanages MT protection of the tunnel offload db.\n\nFixes: e4f5880 (\"net/mlx5: make tunnel hub list thread safe\")\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 256 +++++++++++++++++++++++++----------\n drivers/net/mlx5/mlx5_flow.h |   6 +-\n 2 files changed, 192 insertions(+), 70 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 31c9d82b4a..2f01e34033 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -33,6 +33,14 @@\n #include \"mlx5_common_os.h\"\n #include \"rte_pmd_mlx5.h\"\n \n+static bool\n+mlx5_access_tunnel_offload_db\n+\t(struct rte_eth_dev *dev,\n+\t bool (*match)(struct rte_eth_dev *,\n+\t\t       struct mlx5_flow_tunnel *, const void *),\n+\t void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),\n+\t void (*miss)(struct rte_eth_dev *, void *),\n+\t void *ctx, bool lock_op);\n static struct mlx5_flow_tunnel *\n mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);\n static void\n@@ -661,29 +669,68 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+struct tunnel_db_element_release_ctx {\n+\tstruct rte_flow_item *items;\n+\tstruct rte_flow_action *actions;\n+\tuint32_t num_elements;\n+\tstruct rte_flow_error *error;\n+\tint ret;\n+};\n+\n+static bool\n+tunnel_element_release_match(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n+{\n+\tconst struct tunnel_db_element_release_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\tif (ctx->num_elements != 1)\n+\t\treturn false;\n+\telse if (ctx->items)\n+\t\treturn ctx->items == &tunnel->item;\n+\telse if (ctx->actions)\n+\t\treturn ctx->actions == &tunnel->action;\n+\n+\treturn false;\n+}\n+\n+static void\n+tunnel_element_release_hit(struct rte_eth_dev *dev,\n+\t\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n+{\n+\tstruct tunnel_db_element_release_ctx *ctx = x;\n+\tctx->ret = 0;\n+\tif (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))\n+\t\tmlx5_flow_tunnel_free(dev, tunnel);\n+}\n+\n+static void\n+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)\n+{\n+\tstruct tunnel_db_element_release_ctx *ctx = x;\n+\tRTE_SET_USED(dev);\n+\tctx->ret = rte_flow_error_set(ctx->error, EINVAL,\n+\t\t\t\t      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t      \"invalid argument\");\n+}\n+\n static int\n mlx5_flow_item_release(struct rte_eth_dev *dev,\n \t\t       struct rte_flow_item *pmd_items,\n \t\t       uint32_t num_items, struct rte_flow_error *err)\n {\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n+\tstruct tunnel_db_element_release_ctx ctx = {\n+\t\t.items = pmd_items,\n+\t\t.actions = NULL,\n+\t\t.num_elements = num_items,\n+\t\t.error = err,\n+\t};\n \n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (&tun->item == pmd_items) {\n-\t\t\tLIST_REMOVE(tun, chain);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (!tun || num_items != 1)\n-\t\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"invalid argument\");\n-\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n-\t\tmlx5_flow_tunnel_free(dev, tun);\n-\treturn 0;\n+\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n+\t\t\t\t      tunnel_element_release_hit,\n+\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n+\n+\treturn ctx.ret;\n }\n \n static int\n@@ -691,25 +738,18 @@ mlx5_flow_action_release(struct rte_eth_dev *dev,\n \t\t\t struct rte_flow_action *pmd_actions,\n \t\t\t uint32_t num_actions, struct rte_flow_error *err)\n {\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n+\tstruct tunnel_db_element_release_ctx ctx = {\n+\t\t.items = NULL,\n+\t\t.actions = pmd_actions,\n+\t\t.num_elements = num_actions,\n+\t\t.error = err,\n+\t};\n \n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (&tun->action == pmd_actions) {\n-\t\t\tLIST_REMOVE(tun, chain);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (!tun || num_actions != 1)\n-\t\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"invalid argument\");\n-\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n-\t\tmlx5_flow_tunnel_free(dev, tun);\n+\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n+\t\t\t\t      tunnel_element_release_hit,\n+\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n \n-\treturn 0;\n+\treturn ctx.ret;\n }\n \n static int\n@@ -5889,11 +5929,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,\n \tif (flow->tunnel) {\n \t\tstruct mlx5_flow_tunnel *tunnel;\n \n-\t\trte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);\n \t\ttunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);\n \t\tRTE_VERIFY(tunnel);\n-\t\tLIST_REMOVE(tunnel, chain);\n-\t\trte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);\n \t\tif (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))\n \t\t\tmlx5_flow_tunnel_free(dev, tunnel);\n \t}\n@@ -7464,28 +7501,87 @@ static void\n mlx5_flow_tunnel_free(struct rte_eth_dev *dev,\n \t\t      struct mlx5_flow_tunnel *tunnel)\n {\n+\t/* no tunnel hub spinlock protection */\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n \tstruct mlx5_indexed_pool *ipool;\n \n \tDRV_LOG(DEBUG, \"port %u release pmd tunnel id=0x%x\",\n \t\tdev->data->port_id, tunnel->tunnel_id);\n+\trte_spinlock_lock(&thub->sl);\n+\tLIST_REMOVE(tunnel, chain);\n+\trte_spinlock_unlock(&thub->sl);\n \tmlx5_hlist_destroy(tunnel->groups);\n \tipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];\n \tmlx5_ipool_free(ipool, tunnel->tunnel_id);\n }\n \n-static struct mlx5_flow_tunnel *\n-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)\n+static bool\n+mlx5_access_tunnel_offload_db\n+\t(struct rte_eth_dev *dev,\n+\t bool (*match)(struct rte_eth_dev *,\n+\t\t       struct mlx5_flow_tunnel *, const void *),\n+\t void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),\n+\t void (*miss)(struct rte_eth_dev *, void *),\n+\t void *ctx, bool lock_op)\n {\n+\tbool verdict = false;\n \tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n+\tstruct mlx5_flow_tunnel *tunnel;\n \n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (tun->tunnel_id == id)\n+\trte_spinlock_lock(&thub->sl);\n+\tLIST_FOREACH(tunnel, &thub->tunnels, chain) {\n+\t\tverdict = match(dev, tunnel, (const void *)ctx);\n+\t\tif (verdict)\n \t\t\tbreak;\n \t}\n+\tif (!lock_op)\n+\t\trte_spinlock_unlock(&thub->sl);\n+\tif (verdict && hit)\n+\t\thit(dev, tunnel, ctx);\n+\tif (!verdict && miss)\n+\t\tmiss(dev, ctx);\n+\tif (lock_op)\n+\t\trte_spinlock_unlock(&thub->sl);\n \n-\treturn tun;\n+\treturn verdict;\n+}\n+\n+struct tunnel_db_find_tunnel_id_ctx {\n+\tuint32_t tunnel_id;\n+\tstruct mlx5_flow_tunnel *tunnel;\n+};\n+\n+static bool\n+find_tunnel_id_match(struct rte_eth_dev *dev,\n+\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n+{\n+\tconst struct tunnel_db_find_tunnel_id_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\treturn tunnel->tunnel_id == ctx->tunnel_id;\n+}\n+\n+static void\n+find_tunnel_id_hit(struct rte_eth_dev *dev,\n+\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n+{\n+\tstruct tunnel_db_find_tunnel_id_ctx *ctx = x;\n+\tRTE_SET_USED(dev);\n+\tctx->tunnel = tunnel;\n+}\n+\n+static struct mlx5_flow_tunnel *\n+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)\n+{\n+\tstruct tunnel_db_find_tunnel_id_ctx ctx = {\n+\t\t.tunnel_id = id,\n+\t};\n+\n+\tmlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,\n+\t\t\t\t      find_tunnel_id_hit, NULL, &ctx, true);\n+\n+\treturn ctx.tunnel;\n }\n \n static struct mlx5_flow_tunnel *\n@@ -7533,38 +7629,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,\n \treturn tunnel;\n }\n \n+struct tunnel_db_get_tunnel_ctx {\n+\tconst struct rte_flow_tunnel *app_tunnel;\n+\tstruct mlx5_flow_tunnel *tunnel;\n+};\n+\n+static bool get_tunnel_match(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n+{\n+\tconst struct tunnel_db_get_tunnel_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\treturn !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,\n+\t\t       sizeof(*ctx->app_tunnel));\n+}\n+\n+static void get_tunnel_hit(struct rte_eth_dev *dev,\n+\t\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n+{\n+\t/* called under tunnel spinlock protection */\n+\tstruct tunnel_db_get_tunnel_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\ttunnel->refctn++;\n+\tctx->tunnel = tunnel;\n+}\n+\n+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)\n+{\n+\t/* called under tunnel spinlock protection */\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct tunnel_db_get_tunnel_ctx *ctx = x;\n+\n+\trte_spinlock_unlock(&thub->sl);\n+\tctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);\n+\tctx->tunnel->refctn = 1;\n+\trte_spinlock_lock(&thub->sl);\n+\tif (ctx->tunnel)\n+\t\tLIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);\n+}\n+\n+\n static int\n mlx5_get_flow_tunnel(struct rte_eth_dev *dev,\n \t\t     const struct rte_flow_tunnel *app_tunnel,\n \t\t     struct mlx5_flow_tunnel **tunnel)\n {\n-\tint ret;\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n-\n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (!memcmp(app_tunnel, &tun->app_tunnel,\n-\t\t\t    sizeof(*app_tunnel))) {\n-\t\t\t*tunnel = tun;\n-\t\t\tret = 0;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\tif (!tun) {\n-\t\ttun = mlx5_flow_tunnel_allocate(dev, app_tunnel);\n-\t\tif (tun) {\n-\t\t\tLIST_INSERT_HEAD(&thub->tunnels, tun, chain);\n-\t\t\t*tunnel = tun;\n-\t\t} else {\n-\t\t\tret = -ENOMEM;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (tun)\n-\t\t__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);\n+\tstruct tunnel_db_get_tunnel_ctx ctx = {\n+\t\t.app_tunnel = app_tunnel,\n+\t};\n \n-\treturn ret;\n+\tmlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,\n+\t\t\t\t      get_tunnel_miss, &ctx, true);\n+\t*tunnel = ctx.tunnel;\n+\treturn ctx.tunnel ? 0 : -ENOMEM;\n }\n \n void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex e3a5030785..bdf2c50090 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -950,8 +950,12 @@ struct mlx5_flow_tunnel {\n \n /** PMD tunnel related context */\n struct mlx5_flow_tunnel_hub {\n+\t/* Tunnels list\n+\t * Access to the list MUST be MT protected\n+\t */\n \tLIST_HEAD(, mlx5_flow_tunnel) tunnels;\n-\trte_spinlock_t sl;\t\t\t/* Tunnel list spinlock. */\n+\t /* protect access to the tunnels list */\n+\trte_spinlock_t sl;\n \tstruct mlx5_hlist *groups;\t\t/** non tunnel groups */\n };\n \n",
    "prefixes": [
        "2/4"
    ]
}