get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/66578/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 66578,
    "url": "http://patches.dpdk.org/api/patches/66578/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200312111907.31555-4-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200312111907.31555-4-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200312111907.31555-4-ndabilpuram@marvell.com",
    "date": "2020-03-12T11:18:59",
    "name": "[03/11] net/octeontx2: add dynamic topology update support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a3a2e1b52e9dbbf6ff5e7de856b3b9254e1b5ac3",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200312111907.31555-4-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 8894,
            "url": "http://patches.dpdk.org/api/series/8894/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8894",
            "date": "2020-03-12T11:18:56",
            "name": "net/octeontx2: add traffic manager support",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/8894/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/66578/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/66578/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CA4D0A056B;\n\tThu, 12 Mar 2020 12:20:00 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8D2F61C07B;\n\tThu, 12 Mar 2020 12:19:28 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id F324C1C021\n for <dev@dpdk.org>; Thu, 12 Mar 2020 12:19:24 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n 02CBFVnC017662 for <dev@dpdk.org>; Thu, 12 Mar 2020 04:19:24 -0700",
            "from sc-exch04.marvell.com ([199.233.58.184])\n by mx0b-0016f401.pphosted.com with ESMTP id 2yqfggs6ee-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 12 Mar 2020 04:19:24 -0700",
            "from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH04.marvell.com\n (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Thu, 12 Mar\n 2020 04:19:21 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com\n (10.93.176.83) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Thu, 12 Mar 2020 04:19:22 -0700",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id 63DFA3F7040;\n Thu, 12 Mar 2020 04:19:20 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0818; bh=uv31cMeo+Rb++fIPidYmQ+873g0g2wFZc4lAiT6r1Fw=;\n b=st8/MRrzUGCdKnO708T2lxu2K8wHBEqvVXY52mY5LtpWdsTHs55R0kep5h9TION5a7fR\n fwdrLpftG6WDEolGzt5M7LY+3JrO+E1wcc9PLB5PPXsC2DC7/Pl6Za9ouxOub0mDN66y\n e0SaPets6b8SRjinQwyNmM4AlmhtOdiQZ4gcHmVyE/vFCDx/G+Zw4at98JXEIlvPaFrU\n bYjzkJXV+vdFJJnTnbn2YMEMFEwZvhzHD52IsJsUCKY5AXV2PKpOZ+6xLAGwg4HFPcly\n pPJHCVXQ3YJ2zygry0E1yLHWSFU6cKFeRJ2VPXKg4nLTveNvCRvBxUnqp2d5rULxn1v5 Vw==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "Jerin Jacob <jerinj@marvell.com>, Nithin Dabilpuram\n <ndabilpuram@marvell.com>, Kiran Kumar K <kirankumark@marvell.com>",
        "CC": "Krzysztof Kanas <kkanas@marvell.com>, <dev@dpdk.org>",
        "Date": "Thu, 12 Mar 2020 16:48:59 +0530",
        "Message-ID": "<20200312111907.31555-4-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20200312111907.31555-1-ndabilpuram@marvell.com>",
        "References": "<20200312111907.31555-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.138, 18.0.572\n definitions=2020-03-12_03:2020-03-11,\n 2020-03-12 signatures=0",
        "Subject": "[dpdk-dev] [PATCH 03/11] net/octeontx2: add dynamic topology update\n\tsupport",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Modify resource allocation and freeing logic to support\ndynamic topology commit while to traffic is flowing.\nThis patch also modifies SQ flush to timeout based on minimum shaper\nrate configured. SQ flush is further split to pre/post\nfunctions to adhere to HW spec of 96XX C0.\n\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\nSigned-off-by: Krzysztof Kanas <kkanas@marvell.com>\n---\n drivers/common/octeontx2/otx2_dev.h |   9 +\n drivers/net/octeontx2/otx2_ethdev.c |   3 +-\n drivers/net/octeontx2/otx2_ethdev.h |   1 +\n drivers/net/octeontx2/otx2_tm.c     | 538 +++++++++++++++++++++++++++---------\n drivers/net/octeontx2/otx2_tm.h     |   7 +-\n 5 files changed, 417 insertions(+), 141 deletions(-)",
    "diff": "diff --git a/drivers/common/octeontx2/otx2_dev.h b/drivers/common/octeontx2/otx2_dev.h\nindex 0b0a949..13b75e1 100644\n--- a/drivers/common/octeontx2/otx2_dev.h\n+++ b/drivers/common/octeontx2/otx2_dev.h\n@@ -46,6 +46,15 @@\n \t((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) &&\t\\\n \t (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))\n \n+#define otx2_dev_is_96xx_Cx(dev)\t\t\t\t\\\n+\t((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) &&\t\\\n+\t (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))\n+\n+#define otx2_dev_is_96xx_C0(dev)\t\t\t\t\\\n+\t((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) &&\t\\\n+\t (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) &&\t\\\n+\t (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))\n+\n struct otx2_dev;\n \n /* Link status callback */\ndiff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c\nindex e60f490..6896797 100644\n--- a/drivers/net/octeontx2/otx2_ethdev.c\n+++ b/drivers/net/octeontx2/otx2_ethdev.c\n@@ -992,7 +992,7 @@ otx2_nix_tx_queue_release(void *_txq)\n \totx2_nix_dbg(\"Releasing txq %u\", txq->sq);\n \n \t/* Flush and disable tm */\n-\totx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);\n+\totx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started);\n \n \t/* Free sqb's and disable sq */\n \tnix_sq_uninit(txq);\n@@ -1001,6 +1001,7 @@ otx2_nix_tx_queue_release(void *_txq)\n \t\trte_mempool_free(txq->sqb_pool);\n \t\ttxq->sqb_pool = NULL;\n \t}\n+\totx2_nix_sq_flush_post(txq);\n \trte_free(txq);\n }\n \ndiff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h\nindex b7d5386..6679652 100644\n--- a/drivers/net/octeontx2/otx2_ethdev.h\n+++ b/drivers/net/octeontx2/otx2_ethdev.h\n@@ -307,6 +307,7 @@ struct otx2_eth_dev {\n \tuint16_t link_cfg_lvl;\n \tuint16_t tm_flags;\n \tuint16_t tm_leaf_cnt;\n+\tuint64_t tm_rate_min;\n \tstruct otx2_nix_tm_node_list node_list;\n \tstruct otx2_nix_tm_shaper_profile_list shaper_profile_list;\n \tstruct otx2_rss_info rss_info;\ndiff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c\nindex 057297a..b6da668 100644\n--- a/drivers/net/octeontx2/otx2_tm.c\n+++ b/drivers/net/octeontx2/otx2_tm.c\n@@ -59,8 +59,16 @@ static bool\n nix_tm_have_tl1_access(struct otx2_eth_dev *dev)\n {\n \tbool is_lbk = otx2_dev_is_lbk(dev);\n-\treturn otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) &&\n-\t\t!is_lbk && !dev->maxvf;\n+\treturn otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;\n+}\n+\n+static bool\n+nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)\n+{\n+\tif (nix_tm_have_tl1_access(dev))\n+\t\treturn (lvl == OTX2_TM_LVL_QUEUE);\n+\n+\treturn (lvl == OTX2_TM_LVL_SCH4);\n }\n \n static int\n@@ -424,6 +432,48 @@ prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,\n \treturn k;\n }\n \n+static uint8_t\n+prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,\n+\t\t   volatile uint64_t *reg, volatile uint64_t *regval)\n+{\n+\tuint32_t hw_lvl = tm_node->hw_lvl;\n+\tuint32_t schq = tm_node->hw_id;\n+\tuint8_t k = 0;\n+\n+\totx2_tm_dbg(\"sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)\",\n+\t\t    nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,\n+\t\t    tm_node->id, enable, tm_node);\n+\n+\tregval[k] = enable;\n+\n+\tswitch (hw_lvl) {\n+\tcase NIX_TXSCH_LVL_MDQ:\n+\t\treg[k] = NIX_AF_MDQX_SW_XOFF(schq);\n+\t\tk++;\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL4:\n+\t\treg[k] = NIX_AF_TL4X_SW_XOFF(schq);\n+\t\tk++;\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL3:\n+\t\treg[k] = NIX_AF_TL3X_SW_XOFF(schq);\n+\t\tk++;\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL2:\n+\t\treg[k] = NIX_AF_TL2X_SW_XOFF(schq);\n+\t\tk++;\n+\t\tbreak;\n+\tcase NIX_TXSCH_LVL_TL1:\n+\t\treg[k] = NIX_AF_TL1X_SW_XOFF(schq);\n+\t\tk++;\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn k;\n+}\n+\n static int\n populate_tm_reg(struct otx2_eth_dev *dev,\n \t\tstruct otx2_nix_tm_node *tm_node)\n@@ -692,12 +742,13 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,\n \t\t\tuint16_t lvl, bool user,\n \t\t\tstruct rte_tm_node_params *params)\n {\n-\tstruct otx2_nix_tm_shaper_profile *shaper_profile;\n+\tstruct otx2_nix_tm_shaper_profile *profile;\n \tstruct otx2_nix_tm_node *tm_node, *parent_node;\n-\tuint32_t shaper_profile_id;\n+\tstruct shaper_params cir, pir;\n+\tuint32_t profile_id;\n \n-\tshaper_profile_id = params->shaper_profile_id;\n-\tshaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);\n+\tprofile_id = params->shaper_profile_id;\n+\tprofile = nix_tm_shaper_profile_search(dev, profile_id);\n \n \tparent_node = nix_tm_node_search(dev, parent_node_id, user);\n \n@@ -709,6 +760,10 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,\n \ttm_node->lvl = lvl;\n \ttm_node->hw_lvl = hw_lvl;\n \n+\t/* Maintain minimum weight */\n+\tif (!weight)\n+\t\tweight = 1;\n+\n \ttm_node->id = node_id;\n \ttm_node->priority = priority;\n \ttm_node->weight = weight;\n@@ -720,10 +775,22 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,\n \t\ttm_node->flags = NIX_TM_NODE_USER;\n \trte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));\n \n-\tif (shaper_profile)\n-\t\tshaper_profile->reference_count++;\n+\tif (profile)\n+\t\tprofile->reference_count++;\n+\n+\tmemset(&cir, 0, sizeof(cir));\n+\tmemset(&pir, 0, sizeof(pir));\n+\tshaper_config_to_nix(profile, &cir, &pir);\n+\n \ttm_node->parent = parent_node;\n \ttm_node->parent_hw_id = UINT32_MAX;\n+\t/* C0 doesn't support STALL when both PIR & CIR are enabled */\n+\tif (lvl < OTX2_TM_LVL_QUEUE &&\n+\t    otx2_dev_is_96xx_Cx(dev) &&\n+\t    pir.rate && cir.rate)\n+\t\ttm_node->red_algo = NIX_REDALG_DISCARD;\n+\telse\n+\t\ttm_node->red_algo = NIX_REDALG_STD;\n \n \tTAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);\n \n@@ -747,24 +814,67 @@ nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)\n }\n \n static int\n-nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable)\n+nix_clear_path_xoff(struct otx2_eth_dev *dev,\n+\t\t    struct otx2_nix_tm_node *tm_node)\n+{\n+\tstruct nix_txschq_config *req;\n+\tstruct otx2_nix_tm_node *p;\n+\tint rc;\n+\n+\t/* Manipulating SW_XOFF not supported on Ax */\n+\tif (otx2_dev_is_Ax(dev))\n+\t\treturn 0;\n+\n+\t/* Enable nodes in path for flush to succeed */\n+\tif (!nix_tm_is_leaf(dev, tm_node->lvl))\n+\t\tp = tm_node;\n+\telse\n+\t\tp = tm_node->parent;\n+\twhile (p) {\n+\t\tif (!(p->flags & NIX_TM_NODE_ENABLED) &&\n+\t\t    (p->flags & NIX_TM_NODE_HWRES)) {\n+\t\t\treq = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);\n+\t\t\treq->lvl = p->hw_lvl;\n+\t\t\treq->num_regs = prepare_tm_sw_xoff(p, false, req->reg,\n+\t\t\t\t\t\t\t   req->regval);\n+\t\t\trc = otx2_mbox_process(dev->mbox);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\n+\t\t\tp->flags |= NIX_TM_NODE_ENABLED;\n+\t\t}\n+\t\tp = p->parent;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+nix_smq_xoff(struct otx2_eth_dev *dev,\n+\t     struct otx2_nix_tm_node *tm_node,\n+\t     bool enable)\n {\n \tstruct otx2_mbox *mbox = dev->mbox;\n \tstruct nix_txschq_config *req;\n+\tuint16_t smq;\n+\tint rc;\n+\n+\tsmq = tm_node->hw_id;\n+\totx2_tm_dbg(\"Setting SMQ %u XOFF/FLUSH to %s\", smq,\n+\t\t    enable ? \"enable\" : \"disable\");\n+\n+\trc = nix_clear_path_xoff(dev, tm_node);\n+\tif (rc)\n+\t\treturn rc;\n \n \treq = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);\n \treq->lvl = NIX_TXSCH_LVL_SMQ;\n \treq->num_regs = 1;\n \n \treq->reg[0] = NIX_AF_SMQX_CFG(smq);\n-\t/* Unmodified fields */\n-\treq->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) |\n-\t\t\t\t(NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;\n-\n-\tif (enable)\n-\t\treq->regval[0] |= BIT_ULL(50) | BIT_ULL(49);\n-\telse\n-\t\treq->regval[0] |= 0;\n+\treq->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;\n+\treq->regval_mask[0] = enable ?\n+\t\t\t\t~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);\n \n \treturn otx2_mbox_process(mbox);\n }\n@@ -780,6 +890,9 @@ otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)\n \tuint64_t aura_handle;\n \tint rc;\n \n+\totx2_tm_dbg(\"Setting SQ %u SQB aura FC to %s\", txq->sq,\n+\t\t    enable ? \"enable\" : \"disable\");\n+\n \tlf = otx2_npa_lf_obj_get();\n \tif (!lf)\n \t\treturn -EFAULT;\n@@ -824,22 +937,41 @@ otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)\n \treturn 0;\n }\n \n-static void\n+static int\n nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)\n {\n \tuint16_t sqb_cnt, head_off, tail_off;\n \tstruct otx2_eth_dev *dev = txq->dev;\n+\tuint64_t wdata, val, prev;\n \tuint16_t sq = txq->sq;\n-\tuint64_t reg, val;\n \tint64_t *regaddr;\n+\tuint64_t timeout;/* 10's of usec */\n+\n+\t/* Wait for enough time based on shaper min rate */\n+\ttimeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);\n+\ttimeout = timeout / dev->tm_rate_min;\n+\tif (!timeout)\n+\t\ttimeout = 10000;\n+\n+\twdata = ((uint64_t)sq << 32);\n+\tregaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);\n+\tval = otx2_atomic64_add_nosync(wdata, regaddr);\n+\n+\t/* Spin multiple iterations as \"txq->fc_cache_pkts\" can still\n+\t * have space to send pkts even though fc_mem is disabled\n+\t */\n \n \twhile (true) {\n-\t\treg = ((uint64_t)sq << 32);\n-\t\tregaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);\n-\t\tval = otx2_atomic64_add_nosync(reg, regaddr);\n+\t\tprev = val;\n+\t\trte_delay_us(10);\n+\t\tval = otx2_atomic64_add_nosync(wdata, regaddr);\n+\t\t/* Continue on error */\n+\t\tif (val & BIT_ULL(63))\n+\t\t\tcontinue;\n+\n+\t\tif (prev != val)\n+\t\t\tcontinue;\n \n-\t\tregaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);\n-\t\tval = otx2_atomic64_add_nosync(reg, regaddr);\n \t\tsqb_cnt = val & 0xFFFF;\n \t\thead_off = (val >> 20) & 0x3F;\n \t\ttail_off = (val >> 28) & 0x3F;\n@@ -850,68 +982,94 @@ nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)\n \t\t\tbreak;\n \t\t}\n \n-\t\trte_pause();\n+\t\t/* Timeout */\n+\t\tif (!timeout)\n+\t\t\tgoto exit;\n+\t\ttimeout--;\n \t}\n+\n+\treturn 0;\n+exit:\n+\treturn -EFAULT;\n }\n \n-int\n-otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)\n+/* Flush and disable tx queue and its parent SMQ */\n+int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)\n {\n-\tstruct otx2_eth_txq *txq = __txq;\n-\tstruct otx2_eth_dev *dev = txq->dev;\n-\tstruct otx2_mbox *mbox = dev->mbox;\n-\tstruct nix_aq_enq_req *req;\n-\tstruct nix_aq_enq_rsp *rsp;\n-\tuint16_t smq;\n+\tstruct otx2_nix_tm_node *tm_node, *sibling;\n+\tstruct otx2_eth_txq *txq;\n+\tstruct otx2_eth_dev *dev;\n+\tuint16_t sq;\n+\tbool user;\n \tint rc;\n \n-\t/* Get smq from sq */\n-\treq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);\n-\treq->qidx = txq->sq;\n-\treq->ctype = NIX_AQ_CTYPE_SQ;\n-\treq->op = NIX_AQ_INSTOP_READ;\n-\trc = otx2_mbox_process_msg(mbox, (void *)&rsp);\n-\tif (rc) {\n-\t\totx2_err(\"Failed to get smq, rc=%d\", rc);\n-\t\treturn -EIO;\n+\ttxq = _txq;\n+\tdev = txq->dev;\n+\tsq = txq->sq;\n+\n+\tuser = !!(dev->tm_flags & NIX_TM_COMMITTED);\n+\n+\t/* Find the node for this SQ */\n+\ttm_node = nix_tm_node_search(dev, sq, user);\n+\tif (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {\n+\t\totx2_err(\"Invalid node/state for sq %u\", sq);\n+\t\treturn -EFAULT;\n \t}\n \n-\t/* Check if sq is enabled */\n-\tif (!rsp->sq.ena)\n-\t\treturn 0;\n-\n-\tsmq = rsp->sq.smq;\n-\n \t/* Enable CGX RXTX to drain pkts */\n \tif (!dev_started) {\n \t\trc = otx2_cgx_rxtx_start(dev);\n-\t\tif (rc)\n+\t\tif (rc) {\n+\t\t\totx2_err(\"cgx start failed, rc=%d\", rc);\n \t\t\treturn rc;\n-\t}\n-\n-\trc = otx2_nix_sq_sqb_aura_fc(txq, false);\n-\tif (rc < 0) {\n-\t\totx2_err(\"Failed to disable sqb aura fc, rc=%d\", rc);\n-\t\tgoto cleanup;\n+\t\t}\n \t}\n \n \t/* Disable smq xoff for case it was enabled earlier */\n-\trc = nix_smq_xoff(dev, smq, false);\n+\trc = nix_smq_xoff(dev, tm_node->parent, false);\n \tif (rc) {\n-\t\totx2_err(\"Failed to enable smq for sq %u, rc=%d\", txq->sq, rc);\n-\t\tgoto cleanup;\n-\t}\n-\n-\t/* Wait for sq entries to be flushed */\n-\tnix_txq_flush_sq_spin(txq);\n-\n-\t/* Flush and enable smq xoff */\n-\trc = nix_smq_xoff(dev, smq, true);\n-\tif (rc) {\n-\t\totx2_err(\"Failed to disable smq for sq %u, rc=%d\", txq->sq, rc);\n+\t\totx2_err(\"Failed to enable smq %u, rc=%d\",\n+\t\t\t tm_node->parent->hw_id, rc);\n \t\treturn rc;\n \t}\n \n+\t/* As per HRM, to disable an SQ, all other SQ's\n+\t * that feed to same SMQ must be paused before SMQ flush.\n+\t */\n+\tTAILQ_FOREACH(sibling, &dev->node_list, node) {\n+\t\tif (sibling->parent != tm_node->parent)\n+\t\t\tcontinue;\n+\t\tif (!(sibling->flags & NIX_TM_NODE_ENABLED))\n+\t\t\tcontinue;\n+\n+\t\tsq = sibling->id;\n+\t\ttxq = dev->eth_dev->data->tx_queues[sq];\n+\t\tif (!txq)\n+\t\t\tcontinue;\n+\n+\t\trc = otx2_nix_sq_sqb_aura_fc(txq, false);\n+\t\tif (rc) {\n+\t\t\totx2_err(\"Failed to disable sqb aura fc, rc=%d\", rc);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\t/* Wait for sq entries to be flushed */\n+\t\trc = nix_txq_flush_sq_spin(txq);\n+\t\tif (rc) {\n+\t\t\totx2_err(\"Failed to drain sq %u, rc=%d\\n\", txq->sq, rc);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\ttm_node->flags &= ~NIX_TM_NODE_ENABLED;\n+\n+\t/* Disable and flush */\n+\trc = nix_smq_xoff(dev, tm_node->parent, true);\n+\tif (rc) {\n+\t\totx2_err(\"Failed to disable smq %u, rc=%d\",\n+\t\t\t tm_node->parent->hw_id, rc);\n+\t\tgoto cleanup;\n+\t}\n cleanup:\n \t/* Restore cgx state */\n \tif (!dev_started)\n@@ -920,47 +1078,120 @@ otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)\n \treturn rc;\n }\n \n+int otx2_nix_sq_flush_post(void *_txq)\n+{\n+\tstruct otx2_nix_tm_node *tm_node, *sibling;\n+\tstruct otx2_eth_txq *txq = _txq;\n+\tstruct otx2_eth_txq *s_txq;\n+\tstruct otx2_eth_dev *dev;\n+\tbool once = false;\n+\tuint16_t sq, s_sq;\n+\tbool user;\n+\tint rc;\n+\n+\tdev = txq->dev;\n+\tsq = txq->sq;\n+\tuser = !!(dev->tm_flags & NIX_TM_COMMITTED);\n+\n+\t/* Find the node for this SQ */\n+\ttm_node = nix_tm_node_search(dev, sq, user);\n+\tif (!tm_node) {\n+\t\totx2_err(\"Invalid node for sq %u\", sq);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* Enable all the siblings back */\n+\tTAILQ_FOREACH(sibling, &dev->node_list, node) {\n+\t\tif (sibling->parent != tm_node->parent)\n+\t\t\tcontinue;\n+\n+\t\tif (sibling->id == sq)\n+\t\t\tcontinue;\n+\n+\t\tif (!(sibling->flags & NIX_TM_NODE_ENABLED))\n+\t\t\tcontinue;\n+\n+\t\ts_sq = sibling->id;\n+\t\ts_txq = dev->eth_dev->data->tx_queues[s_sq];\n+\t\tif (!s_txq)\n+\t\t\tcontinue;\n+\n+\t\tif (!once) {\n+\t\t\t/* Enable back if any SQ is still present */\n+\t\t\trc = nix_smq_xoff(dev, tm_node->parent, false);\n+\t\t\tif (rc) {\n+\t\t\t\totx2_err(\"Failed to enable smq %u, rc=%d\",\n+\t\t\t\t\t tm_node->parent->hw_id, rc);\n+\t\t\t\treturn rc;\n+\t\t\t}\n+\t\t\tonce = true;\n+\t\t}\n+\n+\t\trc = otx2_nix_sq_sqb_aura_fc(s_txq, true);\n+\t\tif (rc) {\n+\t\t\totx2_err(\"Failed to enable sqb aura fc, rc=%d\", rc);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n static int\n-nix_tm_sw_xon(struct otx2_eth_txq *txq,\n-\t      uint16_t smq, uint32_t rr_quantum)\n+nix_sq_sched_data(struct otx2_eth_dev *dev,\n+\t\t  struct otx2_nix_tm_node *tm_node,\n+\t\t  bool rr_quantum_only)\n {\n-\tstruct otx2_eth_dev *dev = txq->dev;\n+\tstruct rte_eth_dev *eth_dev = dev->eth_dev;\n \tstruct otx2_mbox *mbox = dev->mbox;\n+\tuint16_t sq = tm_node->id, smq;\n \tstruct nix_aq_enq_req *req;\n+\tuint64_t rr_quantum;\n \tint rc;\n \n-\totx2_tm_dbg(\"Enabling sq(%u)->smq(%u), rr_quantum %u\",\n-\t\t    txq->sq, txq->sq, rr_quantum);\n-\t/* Set smq from sq */\n+\tsmq = tm_node->parent->hw_id;\n+\trr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);\n+\n+\tif (rr_quantum_only)\n+\t\totx2_tm_dbg(\"Update sq(%u) rr_quantum 0x%lx\", sq, rr_quantum);\n+\telse\n+\t\totx2_tm_dbg(\"Enabling sq(%u)->smq(%u), rr_quantum 0x%lx\",\n+\t\t\t    sq, smq, rr_quantum);\n+\n+\tif (sq > eth_dev->data->nb_tx_queues)\n+\t\treturn -EFAULT;\n+\n \treq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);\n-\treq->qidx = txq->sq;\n+\treq->qidx = sq;\n \treq->ctype = NIX_AQ_CTYPE_SQ;\n \treq->op = NIX_AQ_INSTOP_WRITE;\n-\treq->sq.smq = smq;\n+\n+\t/* smq update only when needed */\n+\tif (!rr_quantum_only) {\n+\t\treq->sq.smq = smq;\n+\t\treq->sq_mask.smq = ~req->sq_mask.smq;\n+\t}\n \treq->sq.smq_rr_quantum = rr_quantum;\n-\treq->sq_mask.smq = ~req->sq_mask.smq;\n \treq->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;\n \n \trc = otx2_mbox_process(mbox);\n-\tif (rc) {\n+\tif (rc)\n \t\totx2_err(\"Failed to set smq, rc=%d\", rc);\n-\t\treturn -EIO;\n-\t}\n+\treturn rc;\n+}\n+\n+int otx2_nix_sq_enable(void *_txq)\n+{\n+\tstruct otx2_eth_txq *txq = _txq;\n+\tint rc;\n \n \t/* Enable sqb_aura fc */\n \trc = otx2_nix_sq_sqb_aura_fc(txq, true);\n-\tif (rc < 0) {\n+\tif (rc) {\n \t\totx2_err(\"Failed to enable sqb aura fc, rc=%d\", rc);\n \t\treturn rc;\n \t}\n \n-\t/* Disable smq xoff */\n-\trc = nix_smq_xoff(dev, smq, false);\n-\tif (rc) {\n-\t\totx2_err(\"Failed to enable smq for sq %u\", txq->sq);\n-\t\treturn rc;\n-\t}\n-\n \treturn 0;\n }\n \n@@ -968,12 +1199,11 @@ static int\n nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,\n \t\t      uint32_t flags, bool hw_only)\n {\n-\tstruct otx2_nix_tm_shaper_profile *shaper_profile;\n+\tstruct otx2_nix_tm_shaper_profile *profile;\n \tstruct otx2_nix_tm_node *tm_node, *next_node;\n \tstruct otx2_mbox *mbox = dev->mbox;\n \tstruct nix_txsch_free_req *req;\n-\tuint32_t shaper_profile_id;\n-\tbool skip_node = false;\n+\tuint32_t profile_id;\n \tint rc = 0;\n \n \tnext_node = TAILQ_FIRST(&dev->node_list);\n@@ -985,37 +1215,40 @@ nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,\n \t\tif ((tm_node->flags & flags_mask) != flags)\n \t\t\tcontinue;\n \n-\t\tif (nix_tm_have_tl1_access(dev) &&\n-\t\t    tm_node->hw_lvl ==  NIX_TXSCH_LVL_TL1)\n-\t\t\tskip_node = true;\n-\n-\t\totx2_tm_dbg(\"Free hwres for node %u, hwlvl %u, hw_id %u (%p)\",\n-\t\t\t    tm_node->id,  tm_node->hw_lvl,\n-\t\t\t    tm_node->hw_id, tm_node);\n-\t\t/* Free specific HW resource if requested */\n-\t\tif (!skip_node && flags_mask &&\n+\t\tif (!nix_tm_is_leaf(dev, tm_node->lvl) &&\n+\t\t    tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&\n \t\t    tm_node->flags & NIX_TM_NODE_HWRES) {\n+\t\t\t/* Free specific HW resource */\n+\t\t\totx2_tm_dbg(\"Free hwres %s(%u) lvl %u id %u (%p)\",\n+\t\t\t\t    nix_hwlvl2str(tm_node->hw_lvl),\n+\t\t\t\t    tm_node->hw_id, tm_node->lvl,\n+\t\t\t\t    tm_node->id, tm_node);\n+\n+\t\t\trc = nix_clear_path_xoff(dev, tm_node);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\n \t\t\treq = otx2_mbox_alloc_msg_nix_txsch_free(mbox);\n \t\t\treq->flags = 0;\n \t\t\treq->schq_lvl = tm_node->hw_lvl;\n \t\t\treq->schq = tm_node->hw_id;\n \t\t\trc = otx2_mbox_process(mbox);\n \t\t\tif (rc)\n-\t\t\t\tbreak;\n-\t\t} else {\n-\t\t\tskip_node = false;\n+\t\t\t\treturn rc;\n+\t\t\ttm_node->flags &= ~NIX_TM_NODE_HWRES;\n \t\t}\n-\t\ttm_node->flags &= ~NIX_TM_NODE_HWRES;\n \n \t\t/* Leave software elements if needed */\n \t\tif (hw_only)\n \t\t\tcontinue;\n \n-\t\tshaper_profile_id = tm_node->params.shaper_profile_id;\n-\t\tshaper_profile =\n-\t\t\tnix_tm_shaper_profile_search(dev, shaper_profile_id);\n-\t\tif (shaper_profile)\n-\t\t\tshaper_profile->reference_count--;\n+\t\totx2_tm_dbg(\"Free node lvl %u id %u (%p)\",\n+\t\t\t    tm_node->lvl, tm_node->id, tm_node);\n+\n+\t\tprofile_id = tm_node->params.shaper_profile_id;\n+\t\tprofile = nix_tm_shaper_profile_search(dev, profile_id);\n+\t\tif (profile)\n+\t\t\tprofile->reference_count--;\n \n \t\tTAILQ_REMOVE(&dev->node_list, tm_node, node);\n \t\trte_free(tm_node);\n@@ -1060,8 +1293,8 @@ nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,\n \tuint32_t hw_id, schq_con_index, prio_offset;\n \tuint32_t l_id, schq_index;\n \n-\totx2_tm_dbg(\"Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)\",\n-\t\t    child->id, child->lvl, child->hw_lvl, child);\n+\totx2_tm_dbg(\"Assign hw id for child node %s lvl %u id %u (%p)\",\n+\t\t    nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);\n \n \tchild->flags |= NIX_TM_NODE_HWRES;\n \n@@ -1219,8 +1452,8 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)\n {\n \tstruct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);\n \tstruct otx2_nix_tm_node *tm_node;\n-\tuint16_t sq, smq, rr_quantum;\n \tstruct otx2_eth_txq *txq;\n+\tuint16_t sq;\n \tint rc;\n \n \tnix_tm_update_parent_info(dev);\n@@ -1237,42 +1470,68 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)\n \t\treturn rc;\n \t}\n \n-\t/* Enable xmit as all the topology is ready */\n-\tTAILQ_FOREACH(tm_node, &dev->node_list, node) {\n-\t\tif (tm_node->flags & NIX_TM_NODE_ENABLED)\n-\t\t\tcontinue;\n+\t/* Trigger MTU recalulate as SMQ needs MTU conf */\n+\tif (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {\n+\t\trc = otx2_nix_recalc_mtu(eth_dev);\n+\t\tif (rc) {\n+\t\t\totx2_err(\"TM MTU update failed, rc=%d\", rc);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n \n-\t\t/* Enable xmit on sq */\n-\t\tif (tm_node->lvl != OTX2_TM_LVL_QUEUE) {\n+\t/* Mark all non-leaf's as enabled */\n+\tTAILQ_FOREACH(tm_node, &dev->node_list, node) {\n+\t\tif (!nix_tm_is_leaf(dev, tm_node->lvl))\n \t\t\ttm_node->flags |= NIX_TM_NODE_ENABLED;\n+\t}\n+\n+\tif (!xmit_enable)\n+\t\treturn 0;\n+\n+\t/* Update SQ Sched Data while SQ is idle */\n+\tTAILQ_FOREACH(tm_node, &dev->node_list, node) {\n+\t\tif (!nix_tm_is_leaf(dev, tm_node->lvl))\n \t\t\tcontinue;\n+\n+\t\trc = nix_sq_sched_data(dev, tm_node, false);\n+\t\tif (rc) {\n+\t\t\totx2_err(\"SQ %u sched update failed, rc=%d\",\n+\t\t\t\t tm_node->id, rc);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\t/* Finally XON all SMQ's */\n+\tTAILQ_FOREACH(tm_node, &dev->node_list, node) {\n+\t\tif (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)\n+\t\t\tcontinue;\n+\n+\t\trc = nix_smq_xoff(dev, tm_node, false);\n+\t\tif (rc) {\n+\t\t\totx2_err(\"Failed to enable smq %u, rc=%d\",\n+\t\t\t\t tm_node->hw_id, rc);\n+\t\t\treturn rc;\n \t\t}\n+\t}\n \n-\t\t/* Don't enable SMQ or mark as enable */\n-\t\tif (!xmit_enable)\n+\t/* Enable xmit as all the topology is ready */\n+\tTAILQ_FOREACH(tm_node, &dev->node_list, node) {\n+\t\tif (!nix_tm_is_leaf(dev, tm_node->lvl))\n \t\t\tcontinue;\n \n \t\tsq = tm_node->id;\n-\t\tif (sq > eth_dev->data->nb_tx_queues) {\n-\t\t\trc = -EFAULT;\n-\t\t\tbreak;\n-\t\t}\n-\n \t\ttxq = eth_dev->data->tx_queues[sq];\n \n-\t\tsmq = tm_node->parent->hw_id;\n-\t\trr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);\n-\n-\t\trc = nix_tm_sw_xon(txq, smq, rr_quantum);\n-\t\tif (rc)\n-\t\t\tbreak;\n+\t\trc = otx2_nix_sq_enable(txq);\n+\t\tif (rc) {\n+\t\t\totx2_err(\"TM sw xon failed on SQ %u, rc=%d\",\n+\t\t\t\t tm_node->id, rc);\n+\t\t\treturn rc;\n+\t\t}\n \t\ttm_node->flags |= NIX_TM_NODE_ENABLED;\n \t}\n \n-\tif (rc)\n-\t\totx2_err(\"TM failed to enable xmit on sq %u, rc=%d\", sq, rc);\n-\n-\treturn rc;\n+\treturn 0;\n }\n \n static int\n@@ -1282,7 +1541,7 @@ nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)\n \tuint32_t def = eth_dev->data->nb_tx_queues;\n \tstruct rte_tm_node_params params;\n \tuint32_t leaf_parent, i;\n-\tint rc = 0;\n+\tint rc = 0, leaf_level;\n \n \t/* Default params */\n \tmemset(&params, 0, sizeof(params));\n@@ -1325,6 +1584,7 @@ nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)\n \t\t\tgoto exit;\n \n \t\tleaf_parent = def + 4;\n+\t\tleaf_level = OTX2_TM_LVL_QUEUE;\n \t} else {\n \t\tdev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;\n \t\trc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,\n@@ -1356,6 +1616,7 @@ nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)\n \t\t\tgoto exit;\n \n \t\tleaf_parent = def + 3;\n+\t\tleaf_level = OTX2_TM_LVL_SCH4;\n \t}\n \n \t/* Add leaf nodes */\n@@ -1363,7 +1624,7 @@ nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)\n \t\trc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,\n \t\t\t\t\t     DEFAULT_RR_WEIGHT,\n \t\t\t\t\t     NIX_TXSCH_LVL_CNT,\n-\t\t\t\t\t     OTX2_TM_LVL_QUEUE, false, &params);\n+\t\t\t\t\t     leaf_level, false, &params);\n \t\tif (rc)\n \t\t\tbreak;\n \t}\n@@ -1378,6 +1639,7 @@ void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)\n \n \tTAILQ_INIT(&dev->node_list);\n \tTAILQ_INIT(&dev->shaper_profile_list);\n+\tdev->tm_rate_min = 1E9; /* 1Gbps */\n }\n \n int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)\n@@ -1455,7 +1717,7 @@ otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,\n \t\ttm_node = nix_tm_node_search(dev, sq, true);\n \n \t/* Check if we found a valid leaf node */\n-\tif (!tm_node || tm_node->lvl != OTX2_TM_LVL_QUEUE ||\n+\tif (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||\n \t    !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {\n \t\treturn -EIO;\n \t}\n@@ -1464,7 +1726,7 @@ otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,\n \t*smq = tm_node->parent->hw_id;\n \t*rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);\n \n-\trc = nix_smq_xoff(dev, *smq, false);\n+\trc = nix_smq_xoff(dev, tm_node->parent, false);\n \tif (rc)\n \t\treturn rc;\n \ttm_node->flags |= NIX_TM_NODE_ENABLED;\ndiff --git a/drivers/net/octeontx2/otx2_tm.h b/drivers/net/octeontx2/otx2_tm.h\nindex ad7727e..413120a 100644\n--- a/drivers/net/octeontx2/otx2_tm.h\n+++ b/drivers/net/octeontx2/otx2_tm.h\n@@ -10,6 +10,7 @@\n #include <rte_tm_driver.h>\n \n #define NIX_TM_DEFAULT_TREE\tBIT_ULL(0)\n+#define NIX_TM_COMMITTED\tBIT_ULL(1)\n #define NIX_TM_TL1_NO_SP\tBIT_ULL(3)\n \n struct otx2_eth_dev;\n@@ -19,7 +20,9 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev);\n int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev);\n int otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,\n \t\t\t      uint32_t *rr_quantum, uint16_t *smq);\n-int otx2_nix_tm_sw_xoff(void *_txq, bool dev_started);\n+int otx2_nix_sq_flush_pre(void *_txq, bool dev_started);\n+int otx2_nix_sq_flush_post(void *_txq);\n+int otx2_nix_sq_enable(void *_txq);\n int otx2_nix_sq_sqb_aura_fc(void *_txq, bool enable);\n \n struct otx2_nix_tm_node {\n@@ -40,6 +43,7 @@ struct otx2_nix_tm_node {\n #define NIX_TM_NODE_USER\tBIT_ULL(2)\n \t/* Shaper algorithm for RED state @NIX_REDALG_E */\n \tuint32_t red_algo:2;\n+\n \tstruct otx2_nix_tm_node *parent;\n \tstruct rte_tm_node_params params;\n };\n@@ -70,7 +74,6 @@ TAILQ_HEAD(otx2_nix_tm_shaper_profile_list, otx2_nix_tm_shaper_profile);\n \t\t((((__weight) & MAX_SCHED_WEIGHT) *             \\\n \t\t  NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT)\n \n-\n /* DEFAULT_RR_WEIGHT * NIX_TM_RR_QUANTUM_MAX / MAX_SCHED_WEIGHT  */\n /* = NIX_MAX_HW_MTU */\n #define DEFAULT_RR_WEIGHT 71\n",
    "prefixes": [
        "03/11"
    ]
}