get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/29623/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 29623,
    "url": "https://patches.dpdk.org/api/patches/29623/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1507153746-31255-5-git-send-email-ophirmu@mellanox.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1507153746-31255-5-git-send-email-ophirmu@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1507153746-31255-5-git-send-email-ophirmu@mellanox.com",
    "date": "2017-10-04T21:49:03",
    "name": "[dpdk-dev,v3,4/7] net/mlx4: support multi-segments Tx",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ea3cab336299a8f5cfa91b43613d436aba7b1863",
    "submitter": {
        "id": 793,
        "url": "https://patches.dpdk.org/api/people/793/?format=api",
        "name": "Ophir Munk",
        "email": "ophirmu@mellanox.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1507153746-31255-5-git-send-email-ophirmu@mellanox.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/29623/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/29623/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id B2B4E1B3CD;\n\tWed,  4 Oct 2017 23:49:51 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n\tby dpdk.org (Postfix) with ESMTP id A16BD1B3D5\n\tfor <dev@dpdk.org>; Wed,  4 Oct 2017 23:49:49 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n\tophirmu@mellanox.com)\n\twith ESMTPS (AES256-SHA encrypted); 4 Oct 2017 23:49:45 +0200",
            "from pegasus05.mtr.labs.mlnx (pegasus05.mtr.labs.mlnx\n\t[10.210.16.100])\n\tby labmailer.mlnx (8.13.8/8.13.8) with ESMTP id v94LniLq015901;\n\tThu, 5 Oct 2017 00:49:44 +0300",
            "from pegasus05.mtr.labs.mlnx (localhost [127.0.0.1])\n\tby pegasus05.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id\n\tv94Lni0q031334; Wed, 4 Oct 2017 21:49:44 GMT",
            "(from root@localhost)\n\tby pegasus05.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id v94Lni07031333; \n\tWed, 4 Oct 2017 21:49:44 GMT"
        ],
        "From": "Ophir Munk <ophirmu@mellanox.com>",
        "To": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "Cc": "dev@dpdk.org, Thomas Monjalon <thomas@monjalon.net>,\n\tOlga Shern <olgas@mellanox.com>, Matan Azrad <matan@mellanox.com>,\n\tMoti Haimovsky <motih@mellanox.com>",
        "Date": "Wed,  4 Oct 2017 21:49:03 +0000",
        "Message-Id": "<1507153746-31255-5-git-send-email-ophirmu@mellanox.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1507153746-31255-1-git-send-email-ophirmu@mellanox.com>",
        "References": "<1507027711-879-1-git-send-email-matan@mellanox.com>\n\t<1507153746-31255-1-git-send-email-ophirmu@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH v3 4/7] net/mlx4: support multi-segments Tx",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Moti Haimovsky <motih@mellanox.com>\n\nThis patch adds support for transmitting packets spanning over\nmultiple buffers.\nIn this patch we also take into consideration the amount of entries\na packet occupies in the TxQ when setting the report-completion flag\nof the chip.\n\nSigned-off-by: Moti Haimovsky <motih@mellanox.com>\n---\n drivers/net/mlx4/mlx4_rxtx.c | 208 ++++++++++++++++++++++++-------------------\n drivers/net/mlx4/mlx4_rxtx.h |   6 +-\n drivers/net/mlx4/mlx4_txq.c  |  12 ++-\n 3 files changed, 129 insertions(+), 97 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c\nindex f517505..bc0e353 100644\n--- a/drivers/net/mlx4/mlx4_rxtx.c\n+++ b/drivers/net/mlx4/mlx4_rxtx.c\n@@ -63,6 +63,16 @@\n #include \"mlx4_rxtx.h\"\n #include \"mlx4_utils.h\"\n \n+/*\n+ * Pointer-value pair structure\n+ * used in tx_post_send for saving the first DWORD (32 byte)\n+ * of a TXBB0\n+ */\n+struct pv {\n+\tstruct mlx4_wqe_data_seg *dseg;\n+\tuint32_t val;\n+};\n+\n /**\n  * Stamp a WQE so it won't be reused by the HW.\n  * Routine is used when freeing WQE used by the chip or when failing\n@@ -297,34 +307,38 @@\n  *\n  * @param txq\n  *   The Tx queue to post to.\n- * @param wr\n- *   The work request to handle.\n- * @param bad_wr\n- *   The wr in case that posting had failed.\n+ * @param pkt\n+ *   The packet to transmit.\n  *\n  * @return\n  *   0 - success, negative errno value otherwise and rte_errno is set.\n  */\n static inline int\n mlx4_post_send(struct txq *txq,\n-\t       struct rte_mbuf *pkt,\n-\t       uint32_t send_flags)\n+\t       struct rte_mbuf *pkt)\n {\n \tstruct mlx4_wqe_ctrl_seg *ctrl;\n \tstruct mlx4_wqe_data_seg *dseg;\n \tstruct mlx4_sq *sq = &txq->msq;\n+\tstruct rte_mbuf *buf;\n \tuint32_t head_idx = sq->head & sq->txbb_cnt_mask;\n \tuint32_t lkey;\n \tuintptr_t addr;\n+\tuint32_t srcrb_flags;\n+\tuint32_t owner_opcode = MLX4_OPCODE_SEND;\n+\tuint32_t byte_count;\n \tint wqe_real_size;\n \tint nr_txbbs;\n \tint rc;\n+\tstruct pv *pv = (struct pv *)txq->bounce_buf;\n+\tint pv_counter = 0;\n \n \t/* Calculate the needed work queue entry size for this packet. */\n \twqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +\n \t\t\tpkt->nb_segs * sizeof(struct mlx4_wqe_data_seg);\n \tnr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);\n-\t/* Check that there is room for this WQE in the send queue and\n+\t/*\n+\t * Check that there is room for this WQE in the send queue and\n \t * that the WQE size is legal.\n \t */\n \tif (likely(((sq->head - sq->tail) + nr_txbbs +\n@@ -333,76 +347,108 @@\n \t\trc = ENOSPC;\n \t\tgoto err;\n \t}\n-\t/* Get the control and single-data entries of the WQE */\n+\t/* Get the control and data entries of the WQE. */\n \tctrl = (struct mlx4_wqe_ctrl_seg *)mlx4_get_send_wqe(sq, head_idx);\n \tdseg = (struct mlx4_wqe_data_seg *)(((char *)ctrl) +\n \t\tsizeof(struct mlx4_wqe_ctrl_seg));\n-\t/*\n-\t * Fill the data segment with buffer information.\n-\t */\n-\taddr = rte_pktmbuf_mtod(pkt, uintptr_t);\n-\trte_prefetch0((volatile void *)addr);\n-\tdseg->addr = rte_cpu_to_be_64(addr);\n-\t/* Memory region key for this memory pool. */\n-\tlkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(pkt));\n-\tif (unlikely(lkey == (uint32_t)-1)) {\n-\t\t/* MR does not exist. */\n-\t\tDEBUG(\"%p: unable to get MP <-> MR\"\n-\t\t      \" association\", (void *)txq);\n-\t\t/*\n-\t\t * Restamp entry in case of failure.\n-\t\t * Make sure that size is written correctly.\n-\t\t * Note that we give ownership to the SW, not the HW.\n+\t/* Fill the data segments with buffer information. */\n+\tfor (buf = pkt; buf != NULL; buf = buf->next, dseg++) {\n+\t\taddr = rte_pktmbuf_mtod(buf, uintptr_t);\n+\t\trte_prefetch0((volatile void *)addr);\n+\t\t/* Handle WQE wraparound. */\n+\t\tif (unlikely(dseg >= (struct mlx4_wqe_data_seg *)sq->eob))\n+\t\t\tdseg = (struct mlx4_wqe_data_seg *)sq->buf;\n+\t\tdseg->addr = rte_cpu_to_be_64(addr);\n+\t\t/* Memory region key for this memory pool. */\n+\t\tlkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));\n+\t\tif (unlikely(lkey == (uint32_t)-1)) {\n+\t\t\t/* MR does not exist. */\n+\t\t\tDEBUG(\"%p: unable to get MP <-> MR\"\n+\t\t\t      \" association\", (void *)txq);\n+\t\t\t/*\n+\t\t\t * Restamp entry in case of failure.\n+\t\t\t * Make sure that size is written correctly\n+\t\t\t * Note that we give ownership to the SW, not the HW.\n+\t\t\t */\n+\t\t\tctrl->fence_size = (wqe_real_size >> 4) & 0x3f;\n+\t\t\tmlx4_txq_stamp_freed_wqe(sq, head_idx,\n+\t\t\t\t     (sq->head & sq->txbb_cnt) ? 0 : 1);\n+\t\t\trc = EFAULT;\n+\t\t\tgoto err;\n+\t\t}\n+\t\tdseg->lkey = rte_cpu_to_be_32(lkey);\n+\t\tif (likely(buf->data_len))\n+\t\t\tbyte_count = rte_cpu_to_be_32(buf->data_len);\n+\t\telse\n+\t\t\t/*\n+\t\t\t * Zero length segment is treated as inline segment\n+\t\t\t * with zero data.\n+\t\t\t */\n+\t\t\tbyte_count = RTE_BE32(0x80000000);\n+\t\t/* If the data segment is not at the beginning of a\n+\t\t * Tx basic block(TXBB) then write the byte count,\n+\t\t * else postpone the writing to just before updating the\n+\t\t * control segment.\n \t\t */\n-\t\tctrl->fence_size = (wqe_real_size >> 4) & 0x3f;\n-\t\tmlx4_txq_stamp_freed_wqe(sq, head_idx,\n-\t\t\t\t\t (sq->head & sq->txbb_cnt) ? 0 : 1);\n-\t\trc = EFAULT;\n-\t\tgoto err;\n+\t\tif ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {\n+\t\t\t/*\n+\t\t\t * Need a barrier here before writing the byte_count\n+\t\t\t * fields to make sure that all the data is visible\n+\t\t\t * before the byte_count field is set.\n+\t\t\t * Otherwise, if the segment begins a new cacheline,\n+\t\t\t * the HCA prefetcher could grab the 64-byte chunk and\n+\t\t\t * get a valid (!= * 0xffffffff) byte count but stale\n+\t\t\t * data, and end up sending the wrong data.\n+\t\t\t */\n+\t\t\trte_io_wmb();\n+\t\t\tdseg->byte_count = byte_count;\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * This data segment starts at the beginning of a new\n+\t\t\t * TXBB, so we need to postpone its byte_count writing\n+\t\t\t * for later.\n+\t\t\t */\n+\t\t\tpv[pv_counter].dseg = dseg;\n+\t\t\tpv[pv_counter++].val = byte_count;\n+\t\t}\n \t}\n-\tdseg->lkey = rte_cpu_to_be_32(lkey);\n-\t/*\n-\t * Need a barrier here before writing the byte_count field to\n-\t * make sure that all the data is visible before the\n-\t * byte_count field is set.  Otherwise, if the segment begins\n-\t * a new cacheline, the HCA prefetcher could grab the 64-byte\n-\t * chunk and get a valid (!= * 0xffffffff) byte count but\n-\t * stale data, and end up sending the wrong data.\n-\t */\n-\trte_io_wmb();\n-\tif (likely(pkt->data_len))\n-\t\tdseg->byte_count = rte_cpu_to_be_32(pkt->data_len);\n-\telse\n-\t\t/*\n-\t\t * Zero length segment is treated as inline segment\n-\t\t * with zero data.\n-\t\t */\n-\t\tdseg->byte_count = RTE_BE32(0x80000000);\n-\t/*\n-\t * Fill the control parameters for this packet.\n-\t * For raw Ethernet, the SOLICIT flag is used to indicate that no icrc\n-\t * should be calculated\n-\t */\n-\tctrl->srcrb_flags =\n-\t\trte_cpu_to_be_32(MLX4_WQE_CTRL_SOLICIT |\n-\t\t\t\t (send_flags & MLX4_WQE_CTRL_CQ_UPDATE));\n+\t/* Write the first DWORD of each TXBB save earlier. */\n+\tif (pv_counter) {\n+\t\t/* Need a barrier here before writing the byte_count. */\n+\t\trte_io_wmb();\n+\t\tfor (--pv_counter; pv_counter  >= 0; pv_counter--)\n+\t\t\tpv[pv_counter].dseg->byte_count = pv[pv_counter].val;\n+\t}\n+\t/* Fill the control parameters for this packet. */\n \tctrl->fence_size = (wqe_real_size >> 4) & 0x3f;\n \t/*\n \t * The caller should prepare \"imm\" in advance in order to support\n \t * VF to VF communication (when the device is a virtual-function\n \t * device (VF)).\n-\t */\n+\t*/\n \tctrl->imm = 0;\n \t/*\n+\t * For raw Ethernet, the SOLICIT flag is used to indicate that no icrc\n+\t * should be calculated.\n+\t */\n+\ttxq->elts_comp_cd -= nr_txbbs;\n+\tif (unlikely(txq->elts_comp_cd <= 0)) {\n+\t\ttxq->elts_comp_cd = txq->elts_comp_cd_init;\n+\t\tsrcrb_flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |\n+\t\t\t\t       MLX4_WQE_CTRL_CQ_UPDATE);\n+\t} else {\n+\t\tsrcrb_flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);\n+\t}\n+\tctrl->srcrb_flags = srcrb_flags;\n+\t/*\n \t * Make sure descriptor is fully written before\n \t * setting ownership bit (because HW can start\n \t * executing as soon as we do).\n \t */\n-\trte_wmb();\n-\tctrl->owner_opcode =\n-\t\trte_cpu_to_be_32(MLX4_OPCODE_SEND |\n-\t\t\t\t ((sq->head & sq->txbb_cnt) ?\n-\t\t\t\t  MLX4_BIT_WQE_OWN : 0));\n+\t rte_wmb();\n+\t ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |\n+\t\t\t\t\t       ((sq->head & sq->txbb_cnt) ?\n+\t\t\t\t\t       MLX4_BIT_WQE_OWN : 0));\n \tsq->head += nr_txbbs;\n \treturn 0;\n err:\n@@ -429,14 +475,13 @@\n \tstruct txq *txq = (struct txq *)dpdk_txq;\n \tunsigned int elts_head = txq->elts_head;\n \tconst unsigned int elts_n = txq->elts_n;\n-\tunsigned int elts_comp_cd = txq->elts_comp_cd;\n \tunsigned int elts_comp = 0;\n \tunsigned int bytes_sent = 0;\n \tunsigned int i;\n \tunsigned int max;\n \tint err;\n \n-\tassert(elts_comp_cd != 0);\n+\tassert(txq->elts_comp_cd != 0);\n \tmlx4_txq_complete(txq);\n \tmax = (elts_n - (elts_head - txq->elts_tail));\n \tif (max > elts_n)\n@@ -455,8 +500,6 @@\n \t\t\t(((elts_head + 1) == elts_n) ? 0 : elts_head + 1);\n \t\tstruct txq_elt *elt_next = &(*txq->elts)[elts_head_next];\n \t\tstruct txq_elt *elt = &(*txq->elts)[elts_head];\n-\t\tunsigned int segs = buf->nb_segs;\n-\t\tuint32_t send_flags = 0;\n \n \t\t/* Clean up old buffer. */\n \t\tif (likely(elt->buf != NULL)) {\n@@ -474,34 +517,16 @@\n \t\t\t\ttmp = next;\n \t\t\t} while (tmp != NULL);\n \t\t}\n-\t\t/* Request Tx completion. */\n-\t\tif (unlikely(--elts_comp_cd == 0)) {\n-\t\t\telts_comp_cd = txq->elts_comp_cd_init;\n-\t\t\t++elts_comp;\n-\t\t\tsend_flags |= MLX4_WQE_CTRL_CQ_UPDATE;\n-\t\t}\n-\t\tif (likely(segs == 1)) {\n-\t\t\t/* Update element. */\n-\t\t\telt->buf = buf;\n-\t\t\tRTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);\n-\t\t\t/* post the pkt for sending */\n-\t\t\terr = mlx4_post_send(txq, buf, send_flags);\n-\t\t\tif (unlikely(err)) {\n-\t\t\t\tif (unlikely(send_flags &\n-\t\t\t\t\t     MLX4_WQE_CTRL_CQ_UPDATE)) {\n-\t\t\t\t\telts_comp_cd = 1;\n-\t\t\t\t\t--elts_comp;\n-\t\t\t\t}\n-\t\t\t\telt->buf = NULL;\n-\t\t\t\tgoto stop;\n-\t\t\t}\n-\t\t\telt->buf = buf;\n-\t\t\tbytes_sent += buf->pkt_len;\n-\t\t} else {\n-\t\t\terr = -EINVAL;\n-\t\t\trte_errno = -err;\n+\t\tRTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);\n+\t\t/* post the packet for sending. */\n+\t\terr = mlx4_post_send(txq, buf);\n+\t\tif (unlikely(err)) {\n+\t\t\telt->buf = NULL;\n \t\t\tgoto stop;\n \t\t}\n+\t\telt->buf = buf;\n+\t\tbytes_sent += buf->pkt_len;\n+\t\t++elts_comp;\n \t\telts_head = elts_head_next;\n \t}\n stop:\n@@ -517,7 +542,6 @@\n \trte_write32(txq->msq.doorbell_qpn, txq->msq.db);\n \ttxq->elts_head = elts_head;\n \ttxq->elts_comp += elts_comp;\n-\ttxq->elts_comp_cd = elts_comp_cd;\n \treturn i;\n }\n \ndiff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h\nindex df83552..1b90533 100644\n--- a/drivers/net/mlx4/mlx4_rxtx.h\n+++ b/drivers/net/mlx4/mlx4_rxtx.h\n@@ -103,13 +103,15 @@ struct txq {\n \tstruct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */\n \tunsigned int elts_head; /**< Current index in (*elts)[]. */\n \tunsigned int elts_tail; /**< First element awaiting completion. */\n-\tunsigned int elts_comp; /**< Number of completion requests. */\n-\tunsigned int elts_comp_cd; /**< Countdown for next completion. */\n+\tunsigned int elts_comp; /**< Number of pkts waiting for completion. */\n+\tint elts_comp_cd; /**< Countdown for next completion. */\n \tunsigned int elts_comp_cd_init; /**< Initial value for countdown. */\n \tunsigned int elts_n; /**< (*elts)[] length. */\n \tstruct txq_elt (*elts)[]; /**< Tx elements. */\n \tstruct mlx4_txq_stats stats; /**< Tx queue counters. */\n \tuint32_t max_inline; /**< Max inline send size. */\n+\tchar *bounce_buf;\n+\t/**< memory used for storing the first DWORD of data TXBBs. */\n \tstruct {\n \t\tconst struct rte_mempool *mp; /**< Cached memory pool. */\n \t\tstruct ibv_mr *mr; /**< Memory region (for mp). */\ndiff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c\nindex 492779f..bbdeda3 100644\n--- a/drivers/net/mlx4/mlx4_txq.c\n+++ b/drivers/net/mlx4/mlx4_txq.c\n@@ -83,8 +83,14 @@\n \t\trte_calloc_socket(\"TXQ\", 1, sizeof(*elts), 0, txq->ctrl.socket);\n \tint ret = 0;\n \n-\tif (elts == NULL) {\n-\t\tERROR(\"%p: can't allocate packets array\", (void *)txq);\n+\t/* Allocate Bounce-buf memory */\n+\ttxq->bounce_buf = (char *)rte_zmalloc_socket(\"TXQ\",\n+\t\t\t\t\t\t     MLX4_MAX_WQE_SIZE,\n+\t\t\t\t\t\t     RTE_CACHE_LINE_MIN_SIZE,\n+\t\t\t\t\t\t     txq->ctrl.socket);\n+\n+\tif (elts == NULL || txq->bounce_buf == NULL) {\n+\t\tERROR(\"%p: can't allocate TXQ memory\", (void *)txq);\n \t\tret = ENOMEM;\n \t\tgoto error;\n \t}\n@@ -110,6 +116,7 @@\n \tassert(ret == 0);\n \treturn 0;\n error:\n+\trte_free(txq->bounce_buf);\n \trte_free(elts);\n \tDEBUG(\"%p: failed, freed everything\", (void *)txq);\n \tassert(ret > 0);\n@@ -303,7 +310,6 @@ struct txq_mp2mr_mbuf_check_data {\n \tstruct mlx4dv_obj mlxdv;\n \tstruct mlx4dv_qp dv_qp;\n \tstruct mlx4dv_cq dv_cq;\n-\n \tstruct txq tmpl = {\n \t\t.ctrl = {\n \t\t\t.priv = priv,\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "4/7"
    ]
}