get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/110048/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 110048,
    "url": "http://patches.dpdk.org/api/patches/110048/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220422005746.2300736-5-wenjun1.wu@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220422005746.2300736-5-wenjun1.wu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220422005746.2300736-5-wenjun1.wu@intel.com",
    "date": "2022-04-22T00:57:41",
    "name": "[v7,4/9] net/ice: support queue bandwidth limit",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "cca478659150681eb874d53f94927f4c94630d6a",
    "submitter": {
        "id": 2083,
        "url": "http://patches.dpdk.org/api/people/2083/?format=api",
        "name": "Wenjun Wu",
        "email": "wenjun1.wu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220422005746.2300736-5-wenjun1.wu@intel.com/mbox/",
    "series": [
        {
            "id": 22604,
            "url": "http://patches.dpdk.org/api/series/22604/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=22604",
            "date": "2022-04-22T00:57:37",
            "name": "Enable ETS-based TX QoS on PF",
            "version": 7,
            "mbox": "http://patches.dpdk.org/series/22604/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/110048/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/110048/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CD853A0093;\n\tFri, 22 Apr 2022 03:20:02 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A593642800;\n\tFri, 22 Apr 2022 03:19:40 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id 4D1D7427FB\n for <dev@dpdk.org>; Fri, 22 Apr 2022 03:19:38 +0200 (CEST)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 21 Apr 2022 18:19:37 -0700",
            "from npg-wuwenjun-dpdk-01.sh.intel.com ([10.67.110.181])\n by orsmga002.jf.intel.com with ESMTP; 21 Apr 2022 18:19:35 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1650590378; x=1682126378;\n h=from:to:subject:date:message-id:in-reply-to:references:\n mime-version:content-transfer-encoding;\n bh=I9nlta3bHyka96PZJeHwGy3n/1Rc6AGN0wSMJyzS76c=;\n b=Noyuf5i370wkG7MlOHtsQUpOyeM9566V6CO6Bldk5K79SyLLTwk/InOO\n F0mcDE3ixUNcq1o+nytpeQZJEa9RKdANxJdbJLfPTI6AqHB+jPDCQGU/c\n kA+bvRiDgxoP0fCImmq+k7EFsUd999n0vAvrKsIPAQm0opXUDk8L/nt12\n AoF2h/reylBv+AiPJF8LGA/Tv9iOZTKU+1BlNHKrs7VSbhGOTEfZZ/w8f\n x+X2fAl2vMUuhUBD/kvuT0hsdtr9lCaYPVezr0BUWe1gEkFXV+inWe4cK\n 3dm3l0SAKhTinwHaSwj9GvzrYdy74yCnut79mEusZy1tchxejZSuInI4J A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6400,9594,10324\"; a=\"350972722\"",
            "E=Sophos;i=\"5.90,280,1643702400\"; d=\"scan'208\";a=\"350972722\"",
            "E=Sophos;i=\"5.90,280,1643702400\"; d=\"scan'208\";a=\"533816308\""
        ],
        "X-ExtLoop1": "1",
        "From": "Wenjun Wu <wenjun1.wu@intel.com>",
        "To": "dev@dpdk.org,\n\tqiming.yang@intel.com,\n\tqi.z.zhang@intel.com",
        "Subject": "[PATCH v7 4/9] net/ice: support queue bandwidth limit",
        "Date": "Fri, 22 Apr 2022 08:57:41 +0800",
        "Message-Id": "<20220422005746.2300736-5-wenjun1.wu@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20220422005746.2300736-1-wenjun1.wu@intel.com>",
        "References": "<20220329014813.1092054-1-wenjun1.wu@intel.com>\n <20220422005746.2300736-1-wenjun1.wu@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Ting Xu <ting.xu@intel.com>\n\nEnable basic TM API for PF only. Support for adding profiles and queue\nnodes. Only max bandwidth is supported in profiles. Profiles can be\nassigned to target queues. Only TC0 is valid.\n\nSigned-off-by: Wenjun Wu <wenjun1.wu@intel.com>\nSigned-off-by: Ting Xu <ting.xu@intel.com>\n---\n doc/guides/rel_notes/release_22_07.rst |   4 +\n drivers/net/ice/ice_ethdev.c           |  19 +\n drivers/net/ice/ice_ethdev.h           |  48 ++\n drivers/net/ice/ice_tm.c               | 599 +++++++++++++++++++++++++\n drivers/net/ice/meson.build            |   1 +\n 5 files changed, 671 insertions(+)\n create mode 100644 drivers/net/ice/ice_tm.c",
    "diff": "diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst\nindex 42a5f2d990..2ce3c99fb8 100644\n--- a/doc/guides/rel_notes/release_22_07.rst\n+++ b/doc/guides/rel_notes/release_22_07.rst\n@@ -55,6 +55,10 @@ New Features\n      Also, make sure to start the actual text at the margin.\n      =======================================================\n \n+* **Updated Intel ice driver.**\n+\n+  * Added Tx QoS rate limitation and priority configuration support for queue and queue group.\n+  * Added TX QoS queue weight configuration support.\n \n Removed Items\n -------------\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 13adcf90ed..37897765c8 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -205,6 +205,18 @@ static const struct rte_pci_id pci_id_ice_map[] = {\n \t{ .vendor_id = 0, /* sentinel */ },\n };\n \n+static int\n+ice_tm_ops_get(struct rte_eth_dev *dev __rte_unused,\n+\t\tvoid *arg)\n+{\n+\tif (!arg)\n+\t\treturn -EINVAL;\n+\n+\t*(const void **)arg = &ice_tm_ops;\n+\n+\treturn 0;\n+}\n+\n static const struct eth_dev_ops ice_eth_dev_ops = {\n \t.dev_configure                = ice_dev_configure,\n \t.dev_start                    = ice_dev_start,\n@@ -267,6 +279,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {\n \t.timesync_read_time           = ice_timesync_read_time,\n \t.timesync_write_time          = ice_timesync_write_time,\n \t.timesync_disable             = ice_timesync_disable,\n+\t.tm_ops_get                   = ice_tm_ops_get,\n };\n \n /* store statistics names and its offset in stats structure */\n@@ -2312,6 +2325,9 @@ ice_dev_init(struct rte_eth_dev *dev)\n \t/* Initialize RSS context for gtpu_eh */\n \tice_rss_ctx_init(pf);\n \n+\t/* Initialize TM configuration */\n+\tice_tm_conf_init(dev);\n+\n \tif (!ad->is_safe_mode) {\n \t\tret = ice_flow_init(ad);\n \t\tif (ret) {\n@@ -2492,6 +2508,9 @@ ice_dev_close(struct rte_eth_dev *dev)\n \trte_free(pf->proto_xtr);\n \tpf->proto_xtr = NULL;\n \n+\t/* Uninit TM configuration */\n+\tice_tm_conf_uninit(dev);\n+\n \tif (ad->devargs.pps_out_ena) {\n \t\tICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);\n \t\tICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);\ndiff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h\nindex 3ed580d438..0841e1866c 100644\n--- a/drivers/net/ice/ice_ethdev.h\n+++ b/drivers/net/ice/ice_ethdev.h\n@@ -9,10 +9,12 @@\n #include <rte_time.h>\n \n #include <ethdev_driver.h>\n+#include <rte_tm_driver.h>\n \n #include \"base/ice_common.h\"\n #include \"base/ice_adminq_cmd.h\"\n #include \"base/ice_flow.h\"\n+#include \"base/ice_sched.h\"\n \n #define ICE_ADMINQ_LEN               32\n #define ICE_SBIOQ_LEN                32\n@@ -453,6 +455,48 @@ struct ice_acl_info {\n \tuint64_t hw_entry_id[MAX_ACL_NORMAL_ENTRIES];\n };\n \n+TAILQ_HEAD(ice_shaper_profile_list, ice_tm_shaper_profile);\n+TAILQ_HEAD(ice_tm_node_list, ice_tm_node);\n+\n+struct ice_tm_shaper_profile {\n+\tTAILQ_ENTRY(ice_tm_shaper_profile) node;\n+\tuint32_t shaper_profile_id;\n+\tuint32_t reference_count;\n+\tstruct rte_tm_shaper_params profile;\n+};\n+\n+/* Struct to store Traffic Manager node configuration. */\n+struct ice_tm_node {\n+\tTAILQ_ENTRY(ice_tm_node) node;\n+\tuint32_t id;\n+\tuint32_t tc;\n+\tuint32_t priority;\n+\tuint32_t weight;\n+\tuint32_t reference_count;\n+\tstruct ice_tm_node *parent;\n+\tstruct ice_tm_shaper_profile *shaper_profile;\n+\tstruct rte_tm_node_params params;\n+};\n+\n+/* node type of Traffic Manager */\n+enum ice_tm_node_type {\n+\tICE_TM_NODE_TYPE_PORT,\n+\tICE_TM_NODE_TYPE_TC,\n+\tICE_TM_NODE_TYPE_QUEUE,\n+\tICE_TM_NODE_TYPE_MAX,\n+};\n+\n+/* Struct to store all the Traffic Manager configuration. */\n+struct ice_tm_conf {\n+\tstruct ice_shaper_profile_list shaper_profile_list;\n+\tstruct ice_tm_node *root; /* root node - vf vsi */\n+\tstruct ice_tm_node_list tc_list; /* node list for all the TCs */\n+\tstruct ice_tm_node_list queue_list; /* node list for all the queues */\n+\tuint32_t nb_tc_node;\n+\tuint32_t nb_queue_node;\n+\tbool committed;\n+};\n+\n struct ice_pf {\n \tstruct ice_adapter *adapter; /* The adapter this PF associate to */\n \tstruct ice_vsi *main_vsi; /* pointer to main VSI structure */\n@@ -497,6 +541,7 @@ struct ice_pf {\n \tuint64_t old_tx_bytes;\n \tuint64_t supported_rxdid; /* bitmap for supported RXDID */\n \tuint64_t rss_hf;\n+\tstruct ice_tm_conf tm_conf;\n };\n \n #define ICE_MAX_QUEUE_NUM  2048\n@@ -620,6 +665,9 @@ int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,\n \t\t\t struct ice_rss_hash_cfg *cfg);\n int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,\n \t\t\t struct ice_rss_hash_cfg *cfg);\n+void ice_tm_conf_init(struct rte_eth_dev *dev);\n+void ice_tm_conf_uninit(struct rte_eth_dev *dev);\n+extern const struct rte_tm_ops ice_tm_ops;\n \n static inline int\n ice_align_floor(int n)\ndiff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c\nnew file mode 100644\nindex 0000000000..383af88981\n--- /dev/null\n+++ b/drivers/net/ice/ice_tm.c\n@@ -0,0 +1,599 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+#include <rte_tm_driver.h>\n+\n+#include \"ice_ethdev.h\"\n+#include \"ice_rxtx.h\"\n+\n+static int ice_hierarchy_commit(struct rte_eth_dev *dev,\n+\t\t\t\t int clear_on_fail,\n+\t\t\t\t __rte_unused struct rte_tm_error *error);\n+static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,\n+\t      uint32_t parent_node_id, uint32_t priority,\n+\t      uint32_t weight, uint32_t level_id,\n+\t      struct rte_tm_node_params *params,\n+\t      struct rte_tm_error *error);\n+static int ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,\n+\t\t\t    struct rte_tm_error *error);\n+static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,\n+\t\t   int *is_leaf, struct rte_tm_error *error);\n+static int ice_shaper_profile_add(struct rte_eth_dev *dev,\n+\t\t\tuint32_t shaper_profile_id,\n+\t\t\tstruct rte_tm_shaper_params *profile,\n+\t\t\tstruct rte_tm_error *error);\n+static int ice_shaper_profile_del(struct rte_eth_dev *dev,\n+\t\t\t\t   uint32_t shaper_profile_id,\n+\t\t\t\t   struct rte_tm_error *error);\n+\n+const struct rte_tm_ops ice_tm_ops = {\n+\t.shaper_profile_add = ice_shaper_profile_add,\n+\t.shaper_profile_delete = ice_shaper_profile_del,\n+\t.node_add = ice_tm_node_add,\n+\t.node_delete = ice_tm_node_delete,\n+\t.node_type_get = ice_node_type_get,\n+\t.hierarchy_commit = ice_hierarchy_commit,\n+};\n+\n+void\n+ice_tm_conf_init(struct rte_eth_dev *dev)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\n+\t/* initialize node configuration */\n+\tTAILQ_INIT(&pf->tm_conf.shaper_profile_list);\n+\tpf->tm_conf.root = NULL;\n+\tTAILQ_INIT(&pf->tm_conf.tc_list);\n+\tTAILQ_INIT(&pf->tm_conf.queue_list);\n+\tpf->tm_conf.nb_tc_node = 0;\n+\tpf->tm_conf.nb_queue_node = 0;\n+\tpf->tm_conf.committed = false;\n+}\n+\n+void\n+ice_tm_conf_uninit(struct rte_eth_dev *dev)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_tm_node *tm_node;\n+\n+\t/* clear node configuration */\n+\twhile ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {\n+\t\tTAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);\n+\t\trte_free(tm_node);\n+\t}\n+\tpf->tm_conf.nb_queue_node = 0;\n+\twhile ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {\n+\t\tTAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);\n+\t\trte_free(tm_node);\n+\t}\n+\tpf->tm_conf.nb_tc_node = 0;\n+\tif (pf->tm_conf.root) {\n+\t\trte_free(pf->tm_conf.root);\n+\t\tpf->tm_conf.root = NULL;\n+\t}\n+}\n+\n+static inline struct ice_tm_node *\n+ice_tm_node_search(struct rte_eth_dev *dev,\n+\t\t    uint32_t node_id, enum ice_tm_node_type *node_type)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_tm_node_list *tc_list = &pf->tm_conf.tc_list;\n+\tstruct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;\n+\tstruct ice_tm_node *tm_node;\n+\n+\tif (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {\n+\t\t*node_type = ICE_TM_NODE_TYPE_PORT;\n+\t\treturn pf->tm_conf.root;\n+\t}\n+\n+\tTAILQ_FOREACH(tm_node, tc_list, node) {\n+\t\tif (tm_node->id == node_id) {\n+\t\t\t*node_type = ICE_TM_NODE_TYPE_TC;\n+\t\t\treturn tm_node;\n+\t\t}\n+\t}\n+\n+\tTAILQ_FOREACH(tm_node, queue_list, node) {\n+\t\tif (tm_node->id == node_id) {\n+\t\t\t*node_type = ICE_TM_NODE_TYPE_QUEUE;\n+\t\t\treturn tm_node;\n+\t\t}\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static int\n+ice_node_param_check(struct ice_pf *pf, uint32_t node_id,\n+\t\t      uint32_t priority, uint32_t weight,\n+\t\t      struct rte_tm_node_params *params,\n+\t\t      struct rte_tm_error *error)\n+{\n+\t/* checked all the unsupported parameter */\n+\tif (node_id == RTE_TM_NODE_ID_NULL) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\terror->message = \"invalid node id\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (priority) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;\n+\t\terror->message = \"priority should be 0\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (weight != 1) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;\n+\t\terror->message = \"weight must be 1\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* not support shared shaper */\n+\tif (params->shared_shaper_id) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;\n+\t\terror->message = \"shared shaper not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\tif (params->n_shared_shapers) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;\n+\t\terror->message = \"shared shaper not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* for non-leaf node */\n+\tif (node_id >= pf->dev_data->nb_tx_queues) {\n+\t\tif (params->nonleaf.wfq_weight_mode) {\n+\t\t\terror->type =\n+\t\t\t\tRTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;\n+\t\t\terror->message = \"WFQ not supported\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tif (params->nonleaf.n_sp_priorities != 1) {\n+\t\t\terror->type =\n+\t\t\t\tRTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;\n+\t\t\terror->message = \"SP priority not supported\";\n+\t\t\treturn -EINVAL;\n+\t\t} else if (params->nonleaf.wfq_weight_mode &&\n+\t\t\t   !(*params->nonleaf.wfq_weight_mode)) {\n+\t\t\terror->type =\n+\t\t\t\tRTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;\n+\t\t\terror->message = \"WFP should be byte mode\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\treturn 0;\n+\t}\n+\n+\t/* for leaf node */\n+\tif (params->leaf.cman) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;\n+\t\terror->message = \"Congestion management not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\tif (params->leaf.wred.wred_profile_id !=\n+\t    RTE_TM_WRED_PROFILE_ID_NONE) {\n+\t\terror->type =\n+\t\t\tRTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;\n+\t\terror->message = \"WRED not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\tif (params->leaf.wred.shared_wred_context_id) {\n+\t\terror->type =\n+\t\t\tRTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;\n+\t\terror->message = \"WRED not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\tif (params->leaf.wred.n_shared_wred_contexts) {\n+\t\terror->type =\n+\t\t\tRTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;\n+\t\terror->message = \"WRED not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,\n+\t\t   int *is_leaf, struct rte_tm_error *error)\n+{\n+\tenum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;\n+\tstruct ice_tm_node *tm_node;\n+\n+\tif (!is_leaf || !error)\n+\t\treturn -EINVAL;\n+\n+\tif (node_id == RTE_TM_NODE_ID_NULL) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\terror->message = \"invalid node id\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* check if the node id exists */\n+\ttm_node = ice_tm_node_search(dev, node_id, &node_type);\n+\tif (!tm_node) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\terror->message = \"no such node\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (node_type == ICE_TM_NODE_TYPE_QUEUE)\n+\t\t*is_leaf = true;\n+\telse\n+\t\t*is_leaf = false;\n+\n+\treturn 0;\n+}\n+\n+static inline struct ice_tm_shaper_profile *\n+ice_shaper_profile_search(struct rte_eth_dev *dev,\n+\t\t\t   uint32_t shaper_profile_id)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_shaper_profile_list *shaper_profile_list =\n+\t\t&pf->tm_conf.shaper_profile_list;\n+\tstruct ice_tm_shaper_profile *shaper_profile;\n+\n+\tTAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {\n+\t\tif (shaper_profile_id == shaper_profile->shaper_profile_id)\n+\t\t\treturn shaper_profile;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static int\n+ice_shaper_profile_param_check(struct rte_tm_shaper_params *profile,\n+\t\t\t\tstruct rte_tm_error *error)\n+{\n+\t/* min bucket size not supported */\n+\tif (profile->committed.size) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;\n+\t\terror->message = \"committed bucket size not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\t/* max bucket size not supported */\n+\tif (profile->peak.size) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;\n+\t\terror->message = \"peak bucket size not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\t/* length adjustment not supported */\n+\tif (profile->pkt_length_adjust) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;\n+\t\terror->message = \"packet length adjustment not supported\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_shaper_profile_add(struct rte_eth_dev *dev,\n+\t\t\tuint32_t shaper_profile_id,\n+\t\t\tstruct rte_tm_shaper_params *profile,\n+\t\t\tstruct rte_tm_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_tm_shaper_profile *shaper_profile;\n+\tint ret;\n+\n+\tif (!profile || !error)\n+\t\treturn -EINVAL;\n+\n+\tret = ice_shaper_profile_param_check(profile, error);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tshaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);\n+\n+\tif (shaper_profile) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;\n+\t\terror->message = \"profile ID exist\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tshaper_profile = rte_zmalloc(\"ice_tm_shaper_profile\",\n+\t\t\t\t     sizeof(struct ice_tm_shaper_profile),\n+\t\t\t\t     0);\n+\tif (!shaper_profile)\n+\t\treturn -ENOMEM;\n+\tshaper_profile->shaper_profile_id = shaper_profile_id;\n+\trte_memcpy(&shaper_profile->profile, profile,\n+\t\t\t sizeof(struct rte_tm_shaper_params));\n+\tTAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,\n+\t\t\t  shaper_profile, node);\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_shaper_profile_del(struct rte_eth_dev *dev,\n+\t\t\tuint32_t shaper_profile_id,\n+\t\t\tstruct rte_tm_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_tm_shaper_profile *shaper_profile;\n+\n+\tif (!error)\n+\t\treturn -EINVAL;\n+\n+\tshaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);\n+\n+\tif (!shaper_profile) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;\n+\t\terror->message = \"profile ID not exist\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* don't delete a profile if it's used by one or several nodes */\n+\tif (shaper_profile->reference_count) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;\n+\t\terror->message = \"profile in use\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tTAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);\n+\trte_free(shaper_profile);\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,\n+\t      uint32_t parent_node_id, uint32_t priority,\n+\t      uint32_t weight, uint32_t level_id,\n+\t      struct rte_tm_node_params *params,\n+\t      struct rte_tm_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tenum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;\n+\tenum ice_tm_node_type parent_node_type = ICE_TM_NODE_TYPE_MAX;\n+\tstruct ice_tm_shaper_profile *shaper_profile = NULL;\n+\tstruct ice_tm_node *tm_node;\n+\tstruct ice_tm_node *parent_node;\n+\tuint16_t tc_nb = 1;\n+\tint ret;\n+\n+\tif (!params || !error)\n+\t\treturn -EINVAL;\n+\n+\t/* if already committed */\n+\tif (pf->tm_conf.committed) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;\n+\t\terror->message = \"already committed\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tret = ice_node_param_check(pf, node_id, priority, weight,\n+\t\t\t\t    params, error);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* check if the node is already existed */\n+\tif (ice_tm_node_search(dev, node_id, &node_type)) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\terror->message = \"node id already used\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* check the shaper profile id */\n+\tif (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {\n+\t\tshaper_profile = ice_shaper_profile_search(dev,\n+\t\t\tparams->shaper_profile_id);\n+\t\tif (!shaper_profile) {\n+\t\t\terror->type =\n+\t\t\t\tRTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;\n+\t\t\terror->message = \"shaper profile not exist\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* root node if not have a parent */\n+\tif (parent_node_id == RTE_TM_NODE_ID_NULL) {\n+\t\t/* check level */\n+\t\tif (level_id != ICE_TM_NODE_TYPE_PORT) {\n+\t\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;\n+\t\t\terror->message = \"Wrong level\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\t/* obviously no more than one root */\n+\t\tif (pf->tm_conf.root) {\n+\t\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;\n+\t\t\terror->message = \"already have a root\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\t/* add the root node */\n+\t\ttm_node = rte_zmalloc(\"ice_tm_node\",\n+\t\t\t\t      sizeof(struct ice_tm_node),\n+\t\t\t\t      0);\n+\t\tif (!tm_node)\n+\t\t\treturn -ENOMEM;\n+\t\ttm_node->id = node_id;\n+\t\ttm_node->parent = NULL;\n+\t\ttm_node->reference_count = 0;\n+\t\trte_memcpy(&tm_node->params, params,\n+\t\t\t\t sizeof(struct rte_tm_node_params));\n+\t\tpf->tm_conf.root = tm_node;\n+\t\treturn 0;\n+\t}\n+\n+\t/* TC or queue node */\n+\t/* check the parent node */\n+\tparent_node = ice_tm_node_search(dev, parent_node_id,\n+\t\t\t\t\t  &parent_node_type);\n+\tif (!parent_node) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;\n+\t\terror->message = \"parent not exist\";\n+\t\treturn -EINVAL;\n+\t}\n+\tif (parent_node_type != ICE_TM_NODE_TYPE_PORT &&\n+\t    parent_node_type != ICE_TM_NODE_TYPE_TC) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;\n+\t\terror->message = \"parent is not root or TC\";\n+\t\treturn -EINVAL;\n+\t}\n+\t/* check level */\n+\tif (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&\n+\t    level_id != (uint32_t)parent_node_type + 1) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;\n+\t\terror->message = \"Wrong level\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* check the node number */\n+\tif (parent_node_type == ICE_TM_NODE_TYPE_PORT) {\n+\t\t/* check the TC number */\n+\t\tif (pf->tm_conf.nb_tc_node >= tc_nb) {\n+\t\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\t\terror->message = \"too many TCs\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t} else {\n+\t\t/* check the queue number */\n+\t\tif (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {\n+\t\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\t\terror->message = \"too many queues\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tif (node_id >= pf->dev_data->nb_tx_queues) {\n+\t\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\t\terror->message = \"too large queue id\";\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* add the TC or queue node */\n+\ttm_node = rte_zmalloc(\"ice_tm_node\",\n+\t\t\t      sizeof(struct ice_tm_node),\n+\t\t\t      0);\n+\tif (!tm_node)\n+\t\treturn -ENOMEM;\n+\ttm_node->id = node_id;\n+\ttm_node->priority = priority;\n+\ttm_node->weight = weight;\n+\ttm_node->reference_count = 0;\n+\ttm_node->parent = parent_node;\n+\ttm_node->shaper_profile = shaper_profile;\n+\trte_memcpy(&tm_node->params, params,\n+\t\t\t sizeof(struct rte_tm_node_params));\n+\tif (parent_node_type == ICE_TM_NODE_TYPE_PORT) {\n+\t\tTAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,\n+\t\t\t\t  tm_node, node);\n+\t\ttm_node->tc = pf->tm_conf.nb_tc_node;\n+\t\tpf->tm_conf.nb_tc_node++;\n+\t} else {\n+\t\tTAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,\n+\t\t\t\t  tm_node, node);\n+\t\ttm_node->tc = parent_node->tc;\n+\t\tpf->tm_conf.nb_queue_node++;\n+\t}\n+\ttm_node->parent->reference_count++;\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,\n+\t\t struct rte_tm_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tenum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;\n+\tstruct ice_tm_node *tm_node;\n+\n+\tif (!error)\n+\t\treturn -EINVAL;\n+\n+\t/* if already committed */\n+\tif (pf->tm_conf.committed) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;\n+\t\terror->message = \"already committed\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (node_id == RTE_TM_NODE_ID_NULL) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\terror->message = \"invalid node id\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* check if the node id exists */\n+\ttm_node = ice_tm_node_search(dev, node_id, &node_type);\n+\tif (!tm_node) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\terror->message = \"no such node\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* the node should have no child */\n+\tif (tm_node->reference_count) {\n+\t\terror->type = RTE_TM_ERROR_TYPE_NODE_ID;\n+\t\terror->message =\n+\t\t\t\"cannot delete a node which has children\";\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* root node */\n+\tif (node_type == ICE_TM_NODE_TYPE_PORT) {\n+\t\trte_free(tm_node);\n+\t\tpf->tm_conf.root = NULL;\n+\t\treturn 0;\n+\t}\n+\n+\t/* TC or queue node */\n+\ttm_node->parent->reference_count--;\n+\tif (node_type == ICE_TM_NODE_TYPE_TC) {\n+\t\tTAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);\n+\t\tpf->tm_conf.nb_tc_node--;\n+\t} else {\n+\t\tTAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);\n+\t\tpf->tm_conf.nb_queue_node--;\n+\t}\n+\trte_free(tm_node);\n+\n+\treturn 0;\n+}\n+\n+static int ice_hierarchy_commit(struct rte_eth_dev *dev,\n+\t\t\t\t int clear_on_fail,\n+\t\t\t\t __rte_unused struct rte_tm_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;\n+\tstruct ice_tm_node *tm_node;\n+\tstruct ice_tx_queue *txq;\n+\tstruct ice_vsi *vsi;\n+\tint ret_val = ICE_SUCCESS;\n+\tuint64_t peak = 0;\n+\n+\tTAILQ_FOREACH(tm_node, queue_list, node) {\n+\t\ttxq = dev->data->tx_queues[tm_node->id];\n+\t\tvsi = txq->vsi;\n+\t\tif (tm_node->shaper_profile)\n+\t\t\t/* Transfer from Byte per seconds to Kbps */\n+\t\t\tpeak = tm_node->shaper_profile->profile.peak.rate;\n+\n+\t\tpeak = peak / 1000 * BITS_PER_BYTE;\n+\t\tret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,\n+\t\t\t\t tm_node->tc, tm_node->id, ICE_MAX_BW, (u32)peak);\n+\t\tif (ret_val) {\n+\t\t\terror->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;\n+\t\t\tPMD_DRV_LOG(ERR, \"configure queue %u bandwidth failed\", tm_node->id);\n+\t\t\tgoto fail_clear;\n+\t\t}\n+\t}\n+\n+\treturn ret_val;\n+\n+fail_clear:\n+\t/* clear all the traffic manager configuration */\n+\tif (clear_on_fail) {\n+\t\tice_tm_conf_uninit(dev);\n+\t\tice_tm_conf_init(dev);\n+\t}\n+\treturn ret_val;\n+}\ndiff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build\nindex d608da7765..de307c9e71 100644\n--- a/drivers/net/ice/meson.build\n+++ b/drivers/net/ice/meson.build\n@@ -12,6 +12,7 @@ sources = files(\n         'ice_hash.c',\n         'ice_rxtx.c',\n         'ice_switch_filter.c',\n+        'ice_tm.c',\n )\n \n deps += ['hash', 'net', 'common_iavf']\n",
    "prefixes": [
        "v7",
        "4/9"
    ]
}