get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/90325/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 90325,
    "url": "https://patches.dpdk.org/api/patches/90325/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210401094739.22714-37-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210401094739.22714-37-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210401094739.22714-37-ndabilpuram@marvell.com",
    "date": "2021-04-01T09:47:23",
    "name": "[v2,36/52] common/cnxk: add nix tm hierarchy enable/disable",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "21c4bcdd157efc2afe5fd192f2aac883353f519f",
    "submitter": {
        "id": 1202,
        "url": "https://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210401094739.22714-37-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 16050,
            "url": "https://patches.dpdk.org/api/series/16050/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=16050",
            "date": "2021-04-01T09:46:47",
            "name": "Add Marvell CNXK common driver",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/16050/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/90325/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/90325/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8BE69A0548;\n\tThu,  1 Apr 2021 11:52:58 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A1F93140FE3;\n\tThu,  1 Apr 2021 11:49:58 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 0FF9A140FDB\n for <dev@dpdk.org>; Thu,  1 Apr 2021 11:49:56 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 1319fRbX014984 for <dev@dpdk.org>; Thu, 1 Apr 2021 02:49:56 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com with ESMTP id 37n28j1wyw-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 01 Apr 2021 02:49:56 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Thu, 1 Apr 2021 02:49:54 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Thu, 1 Apr 2021 02:49:54 -0700",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id C30953F7041;\n Thu,  1 Apr 2021 02:49:51 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=lVyN06MDMyqcmwbIDreDQBorq8Jl6AsiMy+4j0gLEoQ=;\n b=iNxUv513x6Nm16YL7l3DtbdtWJcnCsULkhY2cA6F+gnLpBdnnA9I6YvLWjhscE0/e66d\n qaItE+k5ntbMkcxEp2hxlPkugXPLJVZP0ChqdTcwRH6nStLHp1VwcX+LzRkMIJYRzbJG\n Sjt3vMa4x3cQxBjnHZ5OKCxVw1ZERXIVzOeh8sJZw5JASDguQg5IpeuBLKLtjYM/n8kJ\n z7kFoCEieC+cIqXvww1dxLP4nb7q9DYnGXEIrf12N6+mhDmwPweN9+fyo9QX9cvJ+o/8\n 9VK6vfTSvPQbXp/BCTOTbi6reiPEGFwAf1oMU/r1pyiHJel0HvjVq45PeywvknXhcXcR lA==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<jerinj@marvell.com>, <skori@marvell.com>, <skoteshwar@marvell.com>,\n <pbhagavatula@marvell.com>, <kirankumark@marvell.com>,\n <psatheesh@marvell.com>, <asekhar@marvell.com>, Nithin Dabilpuram\n <ndabilpuram@marvell.com>",
        "Date": "Thu, 1 Apr 2021 15:17:23 +0530",
        "Message-ID": "<20210401094739.22714-37-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20210401094739.22714-1-ndabilpuram@marvell.com>",
        "References": "<20210305133918.8005-1-ndabilpuram@marvell.com>\n <20210401094739.22714-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "pIs9xW_rFUpSQ3yCWmRsWTtEA1rHnTxH",
        "X-Proofpoint-GUID": "pIs9xW_rFUpSQ3yCWmRsWTtEA1rHnTxH",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.369, 18.0.761\n definitions=2021-04-01_04:2021-03-31,\n 2021-04-01 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v2 36/52] common/cnxk: add nix tm hierarchy\n enable/disable",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support to enable or disable hierarchy along with\nallocating node HW resources such as shapers and schedulers\nand configuring them to match the user created or default\nhierarchy.\n\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\n---\n drivers/common/cnxk/roc_nix.h          |   8 +\n drivers/common/cnxk/roc_nix_priv.h     |  16 ++\n drivers/common/cnxk/roc_nix_tm.c       | 147 ++++++++++++\n drivers/common/cnxk/roc_nix_tm_ops.c   | 234 +++++++++++++++++++\n drivers/common/cnxk/roc_nix_tm_utils.c | 410 +++++++++++++++++++++++++++++++++\n drivers/common/cnxk/version.map        |   2 +\n 6 files changed, 817 insertions(+)",
    "diff": "diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h\nindex 52e001c..7bf3435 100644\n--- a/drivers/common/cnxk/roc_nix.h\n+++ b/drivers/common/cnxk/roc_nix.h\n@@ -392,6 +392,14 @@ struct roc_nix_tm_shaper_profile *__roc_api roc_nix_tm_shaper_profile_next(\n \tstruct roc_nix *roc_nix, struct roc_nix_tm_shaper_profile *__prev);\n \n /*\n+ * TM hierarchy enable/disable API.\n+ */\n+int __roc_api roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix);\n+int __roc_api roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix,\n+\t\t\t\t\t  enum roc_nix_tm_tree tree,\n+\t\t\t\t\t  bool xmit_enable);\n+\n+/*\n  * TM utilities API.\n  */\n int __roc_api roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id);\ndiff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h\nindex 5110967..a40621c 100644\n--- a/drivers/common/cnxk/roc_nix_priv.h\n+++ b/drivers/common/cnxk/roc_nix_priv.h\n@@ -340,7 +340,10 @@ int nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,\n \t\t\t     bool above_thresh);\n void nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp);\n \n+int nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree);\n int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);\n+int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,\n+\t\t\t bool rr_quantum_only);\n \n /*\n  * TM priv utils.\n@@ -369,6 +372,19 @@ bool nix_tm_child_res_valid(struct nix_tm_node_list *list,\n \t\t\t    struct nix_tm_node *parent);\n uint16_t nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig,\n \t\t\t\t  uint16_t *schq, enum roc_nix_tm_tree tree);\n+uint8_t nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,\n+\t\t\t\tvolatile uint64_t *regval);\n+uint8_t nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,\n+\t\t\t\t volatile uint64_t *reg,\n+\t\t\t\t volatile uint64_t *regval,\n+\t\t\t\t volatile uint64_t *regval_mask);\n+uint8_t nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,\n+\t\t\t      volatile uint64_t *reg,\n+\t\t\t      volatile uint64_t *regval);\n+uint8_t nix_tm_shaper_reg_prep(struct nix_tm_node *node,\n+\t\t\t       struct nix_tm_shaper_profile *profile,\n+\t\t\t       volatile uint64_t *reg,\n+\t\t\t       volatile uint64_t *regval);\n struct nix_tm_node *nix_tm_node_alloc(void);\n void nix_tm_node_free(struct nix_tm_node *node);\n struct nix_tm_shaper_profile *nix_tm_shaper_profile_alloc(void);\ndiff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c\nindex 581de4b..762c85a 100644\n--- a/drivers/common/cnxk/roc_nix_tm.c\n+++ b/drivers/common/cnxk/roc_nix_tm.c\n@@ -30,6 +30,93 @@ nix_tm_clear_shaper_profiles(struct nix *nix)\n \t}\n }\n \n+static int\n+nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)\n+{\n+\tuint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];\n+\tuint64_t regval[MAX_REGS_PER_MBOX_MSG];\n+\tstruct nix_tm_shaper_profile *profile;\n+\tuint64_t reg[MAX_REGS_PER_MBOX_MSG];\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tstruct nix_txschq_config *req;\n+\tint rc = -EFAULT;\n+\tuint32_t hw_lvl;\n+\tuint8_t k = 0;\n+\n+\tmemset(regval, 0, sizeof(regval));\n+\tmemset(regval_mask, 0, sizeof(regval_mask));\n+\n+\tprofile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);\n+\thw_lvl = node->hw_lvl;\n+\n+\t/* Need this trigger to configure TL1 */\n+\tif (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {\n+\t\t/* Prepare default conf for TL1 */\n+\t\treq = mbox_alloc_msg_nix_txschq_cfg(mbox);\n+\t\treq->lvl = NIX_TXSCH_LVL_TL1;\n+\n+\t\tk = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,\n+\t\t\t\t\t    req->regval);\n+\t\treq->num_regs = k;\n+\t\trc = mbox_process(mbox);\n+\t\tif (rc)\n+\t\t\tgoto error;\n+\t}\n+\n+\t/* Prepare topology config */\n+\tk = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);\n+\n+\t/* Prepare schedule config */\n+\tk += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);\n+\n+\t/* Prepare shaping config */\n+\tk += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);\n+\n+\tif (!k)\n+\t\treturn 0;\n+\n+\t/* Copy and send config mbox */\n+\treq = mbox_alloc_msg_nix_txschq_cfg(mbox);\n+\treq->lvl = hw_lvl;\n+\treq->num_regs = k;\n+\n+\tmbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);\n+\tmbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);\n+\tmbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);\n+\n+\trc = mbox_process(mbox);\n+\tif (rc)\n+\t\tgoto error;\n+\n+\treturn 0;\n+error:\n+\tplt_err(\"Txschq conf failed for node %p, rc=%d\", node, rc);\n+\treturn rc;\n+}\n+\n+int\n+nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)\n+{\n+\tstruct nix_tm_node_list *list;\n+\tstruct nix_tm_node *node;\n+\tuint32_t hw_lvl;\n+\tint rc = 0;\n+\n+\tlist = nix_tm_node_list(nix, tree);\n+\n+\tfor (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {\n+\t\tTAILQ_FOREACH(node, list, node) {\n+\t\t\tif (node->hw_lvl != hw_lvl)\n+\t\t\t\tcontinue;\n+\t\t\trc = nix_tm_node_reg_conf(nix, node);\n+\t\t\tif (rc)\n+\t\t\t\tgoto exit;\n+\t\t}\n+\t}\n+exit:\n+\treturn rc;\n+}\n+\n int\n nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)\n {\n@@ -478,6 +565,66 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)\n }\n \n int\n+nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,\n+\t\t     bool rr_quantum_only)\n+{\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tuint16_t qid = node->id, smq;\n+\tuint64_t rr_quantum;\n+\tint rc;\n+\n+\tsmq = node->parent->hw_id;\n+\trr_quantum = nix_tm_weight_to_rr_quantum(node->weight);\n+\n+\tif (rr_quantum_only)\n+\t\tplt_tm_dbg(\"Update sq(%u) rr_quantum 0x%\" PRIx64, qid,\n+\t\t\t   rr_quantum);\n+\telse\n+\t\tplt_tm_dbg(\"Enabling sq(%u)->smq(%u), rr_quantum 0x%\" PRIx64,\n+\t\t\t   qid, smq, rr_quantum);\n+\n+\tif (qid > nix->nb_tx_queues)\n+\t\treturn -EFAULT;\n+\n+\tif (roc_model_is_cn9k()) {\n+\t\tstruct nix_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_aq_enq(mbox);\n+\t\taq->qidx = qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_SQ;\n+\t\taq->op = NIX_AQ_INSTOP_WRITE;\n+\n+\t\t/* smq update only when needed */\n+\t\tif (!rr_quantum_only) {\n+\t\t\taq->sq.smq = smq;\n+\t\t\taq->sq_mask.smq = ~aq->sq_mask.smq;\n+\t\t}\n+\t\taq->sq.smq_rr_quantum = rr_quantum;\n+\t\taq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;\n+\t} else {\n+\t\tstruct nix_cn10k_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);\n+\t\taq->qidx = qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_SQ;\n+\t\taq->op = NIX_AQ_INSTOP_WRITE;\n+\n+\t\t/* smq update only when needed */\n+\t\tif (!rr_quantum_only) {\n+\t\t\taq->sq.smq = smq;\n+\t\t\taq->sq_mask.smq = ~aq->sq_mask.smq;\n+\t\t}\n+\t\taq->sq.smq_rr_weight = rr_quantum;\n+\t\taq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;\n+\t}\n+\n+\trc = mbox_process(mbox);\n+\tif (rc)\n+\t\tplt_err(\"Failed to set smq, rc=%d\", rc);\n+\treturn rc;\n+}\n+\n+int\n nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,\n \t\t\t bool above_thresh)\n {\ndiff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c\nindex 1e952c4..6bb0766 100644\n--- a/drivers/common/cnxk/roc_nix_tm_ops.c\n+++ b/drivers/common/cnxk/roc_nix_tm_ops.c\n@@ -309,3 +309,237 @@ roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)\n {\n \treturn nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);\n }\n+\n+int\n+roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(roc_nix);\n+\tuint16_t sqb_cnt, head_off, tail_off;\n+\tuint16_t sq_cnt = nix->nb_tx_queues;\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tstruct nix_tm_node_list *list;\n+\tenum roc_nix_tm_tree tree;\n+\tstruct nix_tm_node *node;\n+\tstruct roc_nix_sq *sq;\n+\tuint64_t wdata, val;\n+\tuintptr_t regaddr;\n+\tint rc = -1, i;\n+\n+\tif (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))\n+\t\treturn 0;\n+\n+\tplt_tm_dbg(\"Disabling hierarchy on %s\", nix->pci_dev->name);\n+\n+\ttree = nix->tm_tree;\n+\tlist = nix_tm_node_list(nix, tree);\n+\n+\t/* Enable CGX RXTX to drain pkts */\n+\tif (!roc_nix->io_enabled) {\n+\t\t/* Though it enables both RX MCAM Entries and CGX Link\n+\t\t * we assume all the rx queues are stopped way back.\n+\t\t */\n+\t\tmbox_alloc_msg_nix_lf_start_rx(mbox);\n+\t\trc = mbox_process(mbox);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"cgx start failed, rc=%d\", rc);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\t/* XON all SMQ's */\n+\tTAILQ_FOREACH(node, list, node) {\n+\t\tif (node->hw_lvl != NIX_TXSCH_LVL_SMQ)\n+\t\t\tcontinue;\n+\t\tif (!(node->flags & NIX_TM_NODE_HWRES))\n+\t\t\tcontinue;\n+\n+\t\trc = nix_tm_smq_xoff(nix, node, false);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to enable smq %u, rc=%d\", node->hw_id,\n+\t\t\t\trc);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\t}\n+\n+\t/* Flush all tx queues */\n+\tfor (i = 0; i < sq_cnt; i++) {\n+\t\tsq = nix->sqs[i];\n+\t\tif (!sq)\n+\t\t\tcontinue;\n+\n+\t\trc = roc_nix_tm_sq_aura_fc(sq, false);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to disable sqb aura fc, rc=%d\", rc);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\t/* Wait for sq entries to be flushed */\n+\t\trc = roc_nix_tm_sq_flush_spin(sq);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to drain sq, rc=%d\\n\", rc);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\t}\n+\n+\t/* XOFF & Flush all SMQ's. HRM mandates\n+\t * all SQ's empty before SMQ flush is issued.\n+\t */\n+\tTAILQ_FOREACH(node, list, node) {\n+\t\tif (node->hw_lvl != NIX_TXSCH_LVL_SMQ)\n+\t\t\tcontinue;\n+\t\tif (!(node->flags & NIX_TM_NODE_HWRES))\n+\t\t\tcontinue;\n+\n+\t\trc = nix_tm_smq_xoff(nix, node, true);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to enable smq %u, rc=%d\", node->hw_id,\n+\t\t\t\trc);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tnode->flags &= ~NIX_TM_NODE_ENABLED;\n+\t}\n+\n+\t/* Verify sanity of all tx queues */\n+\tfor (i = 0; i < sq_cnt; i++) {\n+\t\tsq = nix->sqs[i];\n+\t\tif (!sq)\n+\t\t\tcontinue;\n+\n+\t\twdata = ((uint64_t)sq->qid << 32);\n+\t\tregaddr = nix->base + NIX_LF_SQ_OP_STATUS;\n+\t\tval = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);\n+\n+\t\tsqb_cnt = val & 0xFFFF;\n+\t\thead_off = (val >> 20) & 0x3F;\n+\t\ttail_off = (val >> 28) & 0x3F;\n+\n+\t\tif (sqb_cnt > 1 || head_off != tail_off ||\n+\t\t    (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))\n+\t\t\tplt_err(\"Failed to gracefully flush sq %u\", sq->qid);\n+\t}\n+\n+\tnix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;\n+cleanup:\n+\t/* Restore cgx state */\n+\tif (!roc_nix->io_enabled) {\n+\t\tmbox_alloc_msg_nix_lf_stop_rx(mbox);\n+\t\trc |= mbox_process(mbox);\n+\t}\n+\treturn rc;\n+}\n+\n+int\n+roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,\n+\t\t\t    bool xmit_enable)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(roc_nix);\n+\tstruct nix_tm_node_list *list;\n+\tstruct nix_tm_node *node;\n+\tstruct roc_nix_sq *sq;\n+\tuint32_t tree_mask;\n+\tuint16_t sq_id;\n+\tint rc;\n+\n+\tif (tree >= ROC_NIX_TM_TREE_MAX)\n+\t\treturn NIX_ERR_PARAM;\n+\n+\tif (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {\n+\t\tif (nix->tm_tree != tree)\n+\t\t\treturn -EBUSY;\n+\t\treturn 0;\n+\t}\n+\n+\tplt_tm_dbg(\"Enabling hierarchy on %s, xmit_ena %u, tree %u\",\n+\t\t   nix->pci_dev->name, xmit_enable, tree);\n+\n+\t/* Free hw resources of other trees */\n+\ttree_mask = NIX_TM_TREE_MASK_ALL;\n+\ttree_mask &= ~BIT(tree);\n+\n+\trc = nix_tm_free_resources(roc_nix, tree_mask, true);\n+\tif (rc) {\n+\t\tplt_err(\"failed to free resources of other trees, rc=%d\", rc);\n+\t\treturn rc;\n+\t}\n+\n+\t/* Update active tree before starting to do anything */\n+\tnix->tm_tree = tree;\n+\n+\tnix_tm_update_parent_info(nix, tree);\n+\n+\trc = nix_tm_alloc_txschq(nix, tree);\n+\tif (rc) {\n+\t\tplt_err(\"TM failed to alloc tm resources=%d\", rc);\n+\t\treturn rc;\n+\t}\n+\n+\trc = nix_tm_assign_resources(nix, tree);\n+\tif (rc) {\n+\t\tplt_err(\"TM failed to assign tm resources=%d\", rc);\n+\t\treturn rc;\n+\t}\n+\n+\trc = nix_tm_txsch_reg_config(nix, tree);\n+\tif (rc) {\n+\t\tplt_err(\"TM failed to configure sched registers=%d\", rc);\n+\t\treturn rc;\n+\t}\n+\n+\tlist = nix_tm_node_list(nix, tree);\n+\t/* Mark all non-leaf's as enabled */\n+\tTAILQ_FOREACH(node, list, node) {\n+\t\tif (!nix_tm_is_leaf(nix, node->lvl))\n+\t\t\tnode->flags |= NIX_TM_NODE_ENABLED;\n+\t}\n+\n+\tif (!xmit_enable)\n+\t\tgoto skip_sq_update;\n+\n+\t/* Update SQ Sched Data while SQ is idle */\n+\tTAILQ_FOREACH(node, list, node) {\n+\t\tif (!nix_tm_is_leaf(nix, node->lvl))\n+\t\t\tcontinue;\n+\n+\t\trc = nix_tm_sq_sched_conf(nix, node, false);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"SQ %u sched update failed, rc=%d\", node->id,\n+\t\t\t\trc);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\t/* Finally XON all SMQ's */\n+\tTAILQ_FOREACH(node, list, node) {\n+\t\tif (node->hw_lvl != NIX_TXSCH_LVL_SMQ)\n+\t\t\tcontinue;\n+\n+\t\trc = nix_tm_smq_xoff(nix, node, false);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to enable smq %u, rc=%d\", node->hw_id,\n+\t\t\t\trc);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\t/* Enable xmit as all the topology is ready */\n+\tTAILQ_FOREACH(node, list, node) {\n+\t\tif (!nix_tm_is_leaf(nix, node->lvl))\n+\t\t\tcontinue;\n+\n+\t\tsq_id = node->id;\n+\t\tsq = nix->sqs[sq_id];\n+\n+\t\trc = roc_nix_tm_sq_aura_fc(sq, true);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"TM sw xon failed on SQ %u, rc=%d\", node->id,\n+\t\t\t\trc);\n+\t\t\treturn rc;\n+\t\t}\n+\t\tnode->flags |= NIX_TM_NODE_ENABLED;\n+\t}\n+\n+skip_sq_update:\n+\tnix->tm_flags |= NIX_TM_HIERARCHY_ENA;\n+\treturn 0;\n+}\ndiff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c\nindex 45de9f6..b644716 100644\n--- a/drivers/common/cnxk/roc_nix_tm_utils.c\n+++ b/drivers/common/cnxk/roc_nix_tm_utils.c\n@@ -5,6 +5,14 @@\n #include \"roc_api.h\"\n #include \"roc_priv.h\"\n \n+static inline uint64_t\n+nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)\n+{\n+\treturn (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |\n+\t       (shaper->div_exp << 13) | (shaper->exponent << 9) |\n+\t       (shaper->mantissa << 1);\n+}\n+\n uint16_t\n nix_tm_lvl2nix_tl1_root(uint32_t lvl)\n {\n@@ -50,6 +58,32 @@ nix_tm_lvl2nix(struct nix *nix, uint32_t lvl)\n \t\treturn nix_tm_lvl2nix_tl2_root(lvl);\n }\n \n+static uint8_t\n+nix_tm_relchan_get(struct nix *nix)\n+{\n+\treturn nix->tx_chan_base & 0xff;\n+}\n+\n+static int\n+nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id,\n+\t\t\tenum roc_nix_tm_tree tree)\n+{\n+\tstruct nix_tm_node *child_node;\n+\tstruct nix_tm_node_list *list;\n+\n+\tlist = nix_tm_node_list(nix, tree);\n+\n+\tTAILQ_FOREACH(child_node, list, node) {\n+\t\tif (!child_node->parent)\n+\t\t\tcontinue;\n+\t\tif (!(child_node->parent->id == node_id))\n+\t\t\tcontinue;\n+\t\tif (child_node->priority == child_node->parent->rr_prio)\n+\t\t\tcontinue;\n+\t\treturn child_node->hw_id - child_node->priority;\n+\t}\n+\treturn 0;\n+}\n \n struct nix_tm_shaper_profile *\n nix_tm_shaper_profile_search(struct nix *nix, uint32_t id)\n@@ -177,6 +211,39 @@ nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,\n \treturn NIX_TM_SHAPER_BURST(exponent, mantissa);\n }\n \n+static void\n+nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,\n+\t\t       struct nix_tm_shaper_data *cir,\n+\t\t       struct nix_tm_shaper_data *pir)\n+{\n+\tif (!profile)\n+\t\treturn;\n+\n+\t/* Calculate CIR exponent and mantissa */\n+\tif (profile->commit.rate)\n+\t\tcir->rate = nix_tm_shaper_rate_conv(\n+\t\t\tprofile->commit.rate, &cir->exponent, &cir->mantissa,\n+\t\t\t&cir->div_exp);\n+\n+\t/* Calculate PIR exponent and mantissa */\n+\tif (profile->peak.rate)\n+\t\tpir->rate = nix_tm_shaper_rate_conv(\n+\t\t\tprofile->peak.rate, &pir->exponent, &pir->mantissa,\n+\t\t\t&pir->div_exp);\n+\n+\t/* Calculate CIR burst exponent and mantissa */\n+\tif (profile->commit.size)\n+\t\tcir->burst = nix_tm_shaper_burst_conv(profile->commit.size,\n+\t\t\t\t\t\t      &cir->burst_exponent,\n+\t\t\t\t\t\t      &cir->burst_mantissa);\n+\n+\t/* Calculate PIR burst exponent and mantissa */\n+\tif (profile->peak.size)\n+\t\tpir->burst = nix_tm_shaper_burst_conv(profile->peak.size,\n+\t\t\t\t\t\t      &pir->burst_exponent,\n+\t\t\t\t\t\t      &pir->burst_mantissa);\n+}\n+\n uint32_t\n nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree,\n \t\tuint32_t *rr_prio, uint32_t *max_prio)\n@@ -309,6 +376,349 @@ nix_tm_child_res_valid(struct nix_tm_node_list *list,\n }\n \n uint8_t\n+nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,\n+\t\t\tvolatile uint64_t *regval)\n+{\n+\tuint8_t k = 0;\n+\n+\t/*\n+\t * Default config for TL1.\n+\t * For VF this is always ignored.\n+\t */\n+\tplt_tm_dbg(\"Default config for main root %s(%u)\",\n+\t\t   nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq);\n+\n+\t/* Set DWRR quantum */\n+\treg[k] = NIX_AF_TL1X_SCHEDULE(schq);\n+\tregval[k] = NIX_TM_TL1_DFLT_RR_QTM;\n+\tk++;\n+\n+\treg[k] = NIX_AF_TL1X_TOPOLOGY(schq);\n+\tregval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1);\n+\tk++;\n+\n+\treg[k] = NIX_AF_TL1X_CIR(schq);\n+\tregval[k] = 0;\n+\tk++;\n+\n+\treturn k;\n+}\n+\n+uint8_t\n+nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,\n+\t\t\t volatile uint64_t *reg, volatile uint64_t *regval,\n+\t\t\t volatile uint64_t *regval_mask)\n+{\n+\tuint8_t k = 0, hw_lvl, parent_lvl;\n+\tuint64_t parent = 0, child = 0;\n+\tenum roc_nix_tm_tree tree;\n+\tuint32_t rr_prio, schq;\n+\tuint16_t link, relchan;\n+\n+\ttree = node->tree;\n+\tschq = node->hw_id;\n+\thw_lvl = node->hw_lvl;\n+\tparent_lvl = hw_lvl + 1;\n+\trr_prio = node->rr_prio;\n+\n+\t/* Root node will not have a parent node */\n+\tif (hw_lvl == nix->tm_root_lvl)\n+\t\tparent = node->parent_hw_id;\n+\telse\n+\t\tparent = node->parent->hw_id;\n+\n+\tlink = nix->tx_link;\n+\trelchan = nix_tm_relchan_get(nix);\n+\n+\tif (hw_lvl != NIX_TXSCH_LVL_SMQ)\n+\t\tchild = nix_tm_find_prio_anchor(nix, node->id, tree);\n+\n+\t/* Override default rr_prio when TL1\n+\t * Static Priority is disabled\n+\t */\n+\tif (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) {\n+\t\trr_prio = NIX_TM_TL1_DFLT_RR_PRIO;\n+\t\tchild = 0;\n+\t}\n+\n+\tplt_tm_dbg(\"Topology config node %s(%u)->%s(%\" PRIu64 \") lvl %u, id %u\"\n+\t\t   \" prio_anchor %\" PRIu64 \" rr_prio %u (%p)\",\n+\t\t   nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl),\n+\t\t   parent, node->lvl, node->id, child, rr_prio, node);\n+\n+\t/* Prepare Topology and Link config */\n+\tswitch (hw_lvl) {\n+\tcase NIX_TXSCH_LVL_SMQ:\n+\n+\t\t/* Set xoff which will be cleared later */\n+\t\treg[k] = NIX_AF_SMQX_CFG(schq);\n+\t\tregval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |\n+\t\t\t     ((nix->mtu & 0xFFFF) << 8));\n+\t\tregval_mask[k] =\n+\t\t\t~(BIT_ULL(50) | GENMASK_ULL(6, 0) | GENMASK_ULL(23, 8));\n+\t\tk++;\n+\n+\t\t/* Parent and schedule conf */\n+\t\treg[k] = NIX_AF_MDQX_PARENT(schq);\n+\t\tregval[k] = parent << 16;\n+\t\tk++;\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL4:\n+\t\t/* Parent and schedule conf */\n+\t\treg[k] = NIX_AF_TL4X_PARENT(schq);\n+\t\tregval[k] = parent << 16;\n+\t\tk++;\n+\n+\t\treg[k] = NIX_AF_TL4X_TOPOLOGY(schq);\n+\t\tregval[k] = (child << 32) | (rr_prio << 1);\n+\t\tk++;\n+\n+\t\t/* Configure TL4 to send to SDP channel instead of CGX/LBK */\n+\t\tif (nix->sdp_link) {\n+\t\t\treg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);\n+\t\t\tregval[k] = BIT_ULL(12);\n+\t\t\tk++;\n+\t\t}\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL3:\n+\t\t/* Parent and schedule conf */\n+\t\treg[k] = NIX_AF_TL3X_PARENT(schq);\n+\t\tregval[k] = parent << 16;\n+\t\tk++;\n+\n+\t\treg[k] = NIX_AF_TL3X_TOPOLOGY(schq);\n+\t\tregval[k] = (child << 32) | (rr_prio << 1);\n+\t\tk++;\n+\n+\t\t/* Link configuration */\n+\t\tif (!nix->sdp_link &&\n+\t\t    nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {\n+\t\t\treg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);\n+\t\t\tregval[k] = BIT_ULL(12) | relchan;\n+\t\t\tk++;\n+\t\t}\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL2:\n+\t\t/* Parent and schedule conf */\n+\t\treg[k] = NIX_AF_TL2X_PARENT(schq);\n+\t\tregval[k] = parent << 16;\n+\t\tk++;\n+\n+\t\treg[k] = NIX_AF_TL2X_TOPOLOGY(schq);\n+\t\tregval[k] = (child << 32) | (rr_prio << 1);\n+\t\tk++;\n+\n+\t\t/* Link configuration */\n+\t\tif (!nix->sdp_link &&\n+\t\t    nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {\n+\t\t\treg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);\n+\t\t\tregval[k] = BIT_ULL(12) | relchan;\n+\t\t\tk++;\n+\t\t}\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL1:\n+\t\treg[k] = NIX_AF_TL1X_TOPOLOGY(schq);\n+\t\tregval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);\n+\t\tk++;\n+\n+\t\tbreak;\n+\t}\n+\n+\treturn k;\n+}\n+\n+uint8_t\n+nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,\n+\t\t      volatile uint64_t *reg, volatile uint64_t *regval)\n+{\n+\tuint64_t strict_prio = node->priority;\n+\tuint32_t hw_lvl = node->hw_lvl;\n+\tuint32_t schq = node->hw_id;\n+\tuint64_t rr_quantum;\n+\tuint8_t k = 0;\n+\n+\trr_quantum = nix_tm_weight_to_rr_quantum(node->weight);\n+\n+\t/* For children to root, strict prio is default if either\n+\t * device root is TL2 or TL1 Static Priority is disabled.\n+\t */\n+\tif (hw_lvl == NIX_TXSCH_LVL_TL2 &&\n+\t    (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP))\n+\t\tstrict_prio = NIX_TM_TL1_DFLT_RR_PRIO;\n+\n+\tplt_tm_dbg(\"Schedule config node %s(%u) lvl %u id %u, \"\n+\t\t   \"prio 0x%\" PRIx64 \", rr_quantum 0x%\" PRIx64 \" (%p)\",\n+\t\t   nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,\n+\t\t   strict_prio, rr_quantum, node);\n+\n+\tswitch (hw_lvl) {\n+\tcase NIX_TXSCH_LVL_SMQ:\n+\t\treg[k] = NIX_AF_MDQX_SCHEDULE(schq);\n+\t\tregval[k] = (strict_prio << 24) | rr_quantum;\n+\t\tk++;\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL4:\n+\t\treg[k] = NIX_AF_TL4X_SCHEDULE(schq);\n+\t\tregval[k] = (strict_prio << 24) | rr_quantum;\n+\t\tk++;\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL3:\n+\t\treg[k] = NIX_AF_TL3X_SCHEDULE(schq);\n+\t\tregval[k] = (strict_prio << 24) | rr_quantum;\n+\t\tk++;\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL2:\n+\t\treg[k] = NIX_AF_TL2X_SCHEDULE(schq);\n+\t\tregval[k] = (strict_prio << 24) | rr_quantum;\n+\t\tk++;\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL1:\n+\t\treg[k] = NIX_AF_TL1X_SCHEDULE(schq);\n+\t\tregval[k] = rr_quantum;\n+\t\tk++;\n+\n+\t\tbreak;\n+\t}\n+\n+\treturn k;\n+}\n+\n+uint8_t\n+nix_tm_shaper_reg_prep(struct nix_tm_node *node,\n+\t\t       struct nix_tm_shaper_profile *profile,\n+\t\t       volatile uint64_t *reg, volatile uint64_t *regval)\n+{\n+\tstruct nix_tm_shaper_data cir, pir;\n+\tuint32_t schq = node->hw_id;\n+\tuint64_t adjust = 0;\n+\tuint8_t k = 0;\n+\n+\tmemset(&cir, 0, sizeof(cir));\n+\tmemset(&pir, 0, sizeof(pir));\n+\tnix_tm_shaper_conf_get(profile, &cir, &pir);\n+\n+\tif (node->pkt_mode)\n+\t\tadjust = 1;\n+\telse if (profile)\n+\t\tadjust = profile->pkt_len_adj;\n+\n+\tplt_tm_dbg(\"Shaper config node %s(%u) lvl %u id %u, \"\n+\t\t   \"pir %\" PRIu64 \"(%\" PRIu64 \"B),\"\n+\t\t   \" cir %\" PRIu64 \"(%\" PRIu64 \"B)\"\n+\t\t   \"adjust 0x%\" PRIx64 \"(pktmode %u) (%p)\",\n+\t\t   nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,\n+\t\t   pir.rate, pir.burst, cir.rate, cir.burst, adjust,\n+\t\t   node->pkt_mode, node);\n+\n+\tswitch (node->hw_lvl) {\n+\tcase NIX_TXSCH_LVL_SMQ:\n+\t\t/* Configure PIR, CIR */\n+\t\treg[k] = NIX_AF_MDQX_PIR(schq);\n+\t\tregval[k] = (pir.rate && pir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&pir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\treg[k] = NIX_AF_MDQX_CIR(schq);\n+\t\tregval[k] = (cir.rate && cir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&cir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\t/* Configure RED ALG */\n+\t\treg[k] = NIX_AF_MDQX_SHAPE(schq);\n+\t\tregval[k] = (adjust | (uint64_t)node->red_algo << 9 |\n+\t\t\t     (uint64_t)node->pkt_mode << 24);\n+\t\tk++;\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL4:\n+\t\t/* Configure PIR, CIR */\n+\t\treg[k] = NIX_AF_TL4X_PIR(schq);\n+\t\tregval[k] = (pir.rate && pir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&pir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\treg[k] = NIX_AF_TL4X_CIR(schq);\n+\t\tregval[k] = (cir.rate && cir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&cir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\t/* Configure RED algo */\n+\t\treg[k] = NIX_AF_TL4X_SHAPE(schq);\n+\t\tregval[k] = (adjust | (uint64_t)node->red_algo << 9 |\n+\t\t\t     (uint64_t)node->pkt_mode << 24);\n+\t\tk++;\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL3:\n+\t\t/* Configure PIR, CIR */\n+\t\treg[k] = NIX_AF_TL3X_PIR(schq);\n+\t\tregval[k] = (pir.rate && pir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&pir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\treg[k] = NIX_AF_TL3X_CIR(schq);\n+\t\tregval[k] = (cir.rate && cir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&cir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\t/* Configure RED algo */\n+\t\treg[k] = NIX_AF_TL3X_SHAPE(schq);\n+\t\tregval[k] = (adjust | (uint64_t)node->red_algo << 9 |\n+\t\t\t     (uint64_t)node->pkt_mode);\n+\t\tk++;\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL2:\n+\t\t/* Configure PIR, CIR */\n+\t\treg[k] = NIX_AF_TL2X_PIR(schq);\n+\t\tregval[k] = (pir.rate && pir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&pir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\treg[k] = NIX_AF_TL2X_CIR(schq);\n+\t\tregval[k] = (cir.rate && cir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&cir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\t/* Configure RED algo */\n+\t\treg[k] = NIX_AF_TL2X_SHAPE(schq);\n+\t\tregval[k] = (adjust | (uint64_t)node->red_algo << 9 |\n+\t\t\t     (uint64_t)node->pkt_mode << 24);\n+\t\tk++;\n+\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL1:\n+\t\t/* Configure CIR */\n+\t\treg[k] = NIX_AF_TL1X_CIR(schq);\n+\t\tregval[k] = (cir.rate && cir.burst) ?\n+\t\t\t\t\t  (nix_tm_shaper2regval(&cir) | 1) :\n+\t\t\t\t\t  0;\n+\t\tk++;\n+\n+\t\t/* Configure length disable and adjust */\n+\t\treg[k] = NIX_AF_TL1X_SHAPE(schq);\n+\t\tregval[k] = (adjust | (uint64_t)node->pkt_mode << 24);\n+\t\tk++;\n+\t\tbreak;\n+\t}\n+\n+\treturn k;\n+}\n+\n+uint8_t\n nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,\n \t\t    volatile uint64_t *reg, volatile uint64_t *regval)\n {\ndiff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map\nindex 4817fd5..9c860ff 100644\n--- a/drivers/common/cnxk/version.map\n+++ b/drivers/common/cnxk/version.map\n@@ -105,6 +105,8 @@ INTERNAL {\n \troc_nix_switch_hdr_set;\n \troc_nix_eeprom_info_get;\n \troc_nix_tm_free_resources;\n+\troc_nix_tm_hierarchy_disable;\n+\troc_nix_tm_hierarchy_enable;\n \troc_nix_tm_node_add;\n \troc_nix_tm_node_delete;\n \troc_nix_tm_node_get;\n",
    "prefixes": [
        "v2",
        "36/52"
    ]
}