From patchwork Tue Jan 2 10:28:59 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 135636 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5F30437F7; Tue, 2 Jan 2024 03:08:07 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BAF5E402BE; Tue, 2 Jan 2024 03:08:07 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 4C079402BD for ; Tue, 2 Jan 2024 03:08:05 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1704161285; x=1735697285; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=y8SQPiw7Qv+z629q46ReOkUEcCtpK2khzXDZ8lqdW0Q=; b=QRE7BEFPKpps7iEwQQ+4NTCMrQGmxzYt+3GI9q2zg46MDOvvhEVCk15m 7oMgs1hFIOBz3zuOqUmFDBlmz42w3Iqgd7nQxFQZ7lhlHS2Dg6MCTYpd3 2OyFKNQ8ZM3z5AW8yNJCiu8P5W41kXjzoRZicth2+bbYggs52nhrd5ftK 1YUnMxEQu04yrVZRvTufwZNTisqcBQPq9UhXjv6AYz83rHMl+ULubjHXs YRmJrpb2hXAOeG/bkR/4tOfhig4pgv/x/3LPU7Fs8ffBTQpXcN4AuS3SA 4ZfDri/eJmeDVbnl8ukvCJU2EWc/8+HNBqpQTvlM5h8A2vcWpyb/TLopO g==; X-IronPort-AV: E=McAfee;i="6600,9927,10940"; a="427991431" X-IronPort-AV: E=Sophos;i="6.04,324,1695711600"; d="scan'208";a="427991431" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Jan 2024 18:07:59 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10940"; a="870108601" X-IronPort-AV: E=Sophos;i="6.04,324,1695711600"; d="scan'208";a="870108601" Received: from dpdk-qzhan15-test02.sh.intel.com ([10.67.115.37]) by FMSMGA003.fm.intel.com with ESMTP; 01 Jan 2024 18:07:57 -0800 From: Qi Zhang To: qiming.yang@intel.com, wenjun1.wu@intel.com Cc: dev@dpdk.org, Qi Zhang Subject: [PATCH v4 1/2] net/ice: reset Tx sched node during commit Date: Tue, 2 Jan 2024 05:28:59 -0500 Message-Id: <20240102102900.3435496-1-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.31.1 In-Reply-To: <20231226185428.3158880-1-qi.z.zhang@intel.com> References: <20231226185428.3158880-1-qi.z.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org 1. Always reset all Tx scheduler at the begining of a commit action. This prevent unexpected remains from previous commit. 2. Reset all Tx scheduler nodes if a commit failed. For leaf node, stop queues which will remove sched node from scheduler tree, then start queues which will add sched node back to default topo. For noleaf node, simply reset to default parameters. Signed-off-by: Qi Zhang --- v4: - show node type in brief mode. v3: - fix incorrect parameter when query rl profile v2: - fix CI build issue drivers/net/ice/ice_ethdev.h | 1 + drivers/net/ice/ice_tm.c | 134 ++++++++++++++++++++++++++++------- 2 files changed, 111 insertions(+), 24 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 1338c80d14..3b2db6aaa6 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -478,6 +478,7 @@ struct ice_tm_node { struct ice_tm_node **children; struct ice_tm_shaper_profile *shaper_profile; struct rte_tm_node_params params; + struct ice_sched_node *sched_node; }; /* node type of Traffic Manager */ diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c index 1a30524b05..2ae55418b0 100644 --- a/drivers/net/ice/ice_tm.c +++ b/drivers/net/ice/ice_tm.c @@ -764,16 +764,94 @@ static int ice_cfg_hw_node(struct ice_hw *hw, return 0; } +static struct ice_sched_node *ice_get_vsi_node(struct ice_hw *hw) +{ + struct ice_sched_node *node = hw->port_info->root; + uint32_t vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + uint32_t i; + + for (i = 0; i < vsi_layer; i++) + node = node->children[0]; + + return node; +} + +static int ice_reset_noleaf_nodes(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list; + struct ice_sched_node *vsi_node = ice_get_vsi_node(hw); + struct ice_tm_node *tm_node; + int ret; + + /* reset vsi_node */ + ret = ice_set_node_rate(hw, NULL, vsi_node->info.node_teid, ICE_AGG_TYPE_VSI); + if (ret) { + PMD_DRV_LOG(ERR, "reset vsi node failed"); + return ret; + } + + /* reset queue group nodes */ + TAILQ_FOREACH(tm_node, qgroup_list, node) { + if (tm_node->sched_node == NULL) + continue; + + ret = ice_cfg_hw_node(hw, NULL, + tm_node->sched_node, + ICE_AGG_TYPE_Q); + + if (ret) { + PMD_DRV_LOG(ERR, "reset queue group node %u failed", tm_node->id); + return ret; + } + tm_node->sched_node = NULL; + } + + return 0; +} + +static int ice_remove_leaf_nodes(struct rte_eth_dev *dev) +{ + int ret = 0; + int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ret = ice_tx_queue_stop(dev, i); + if (ret) { + PMD_DRV_LOG(ERR, "stop queue %u failed", i); + break; + } + } + + return ret; +} + +static int ice_add_leaf_nodes(struct rte_eth_dev *dev) +{ + int ret = 0; + int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ret = ice_tx_queue_start(dev, i); + if (ret) { + PMD_DRV_LOG(ERR, "start queue %u failed", i); + break; + } + } + + return ret; +} + static int ice_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, - __rte_unused struct rte_tm_error *error) + struct rte_tm_error *error) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list; struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list; struct ice_tm_node *tm_node; - struct ice_sched_node *node; struct ice_sched_node *vsi_node = NULL; struct ice_sched_node *queue_node; struct ice_tx_queue *txq; @@ -785,23 +863,25 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev, uint32_t nb_qg; uint32_t qid; uint32_t q_teid; - uint32_t vsi_layer; - for (i = 0; i < dev->data->nb_tx_queues; i++) { - ret_val = ice_tx_queue_stop(dev, i); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, "stop queue %u failed", i); - goto fail_clear; - } + /* remove leaf nodes */ + ret_val = ice_remove_leaf_nodes(dev); + if (ret_val) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + PMD_DRV_LOG(ERR, "reset no-leaf nodes failed"); + goto fail_clear; } - node = hw->port_info->root; - vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; - for (i = 0; i < vsi_layer; i++) - node = node->children[0]; - vsi_node = node; + /* reset no-leaf nodes. */ + ret_val = ice_reset_noleaf_nodes(dev); + if (ret_val) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + PMD_DRV_LOG(ERR, "reset leaf nodes failed"); + goto add_leaf; + } + /* config vsi node */ + vsi_node = ice_get_vsi_node(hw); tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list); ret_val = ice_set_node_rate(hw, tm_node, @@ -812,9 +892,10 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "configure vsi node %u bandwidth failed", tm_node->id); - goto reset_vsi; + goto add_leaf; } + /* config queue group nodes */ nb_vsi_child = vsi_node->num_children; nb_qg = vsi_node->children[0]->num_children; @@ -833,7 +914,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev, if (ret_val) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; PMD_DRV_LOG(ERR, "start queue %u failed", qid); - goto reset_vsi; + goto reset_leaf; } txq = dev->data->tx_queues[qid]; q_teid = txq->q_teid; @@ -841,7 +922,7 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev, if (queue_node == NULL) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; PMD_DRV_LOG(ERR, "get queue %u node failed", qid); - goto reset_vsi; + goto reset_leaf; } if (queue_node->info.parent_teid == qgroup_sched_node->info.node_teid) continue; @@ -849,19 +930,20 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev, if (ret_val) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; PMD_DRV_LOG(ERR, "move queue %u failed", qid); - goto reset_vsi; + goto reset_leaf; } } ret_val = ice_cfg_hw_node(hw, tm_node, qgroup_sched_node, ICE_AGG_TYPE_Q); + tm_node->sched_node = qgroup_sched_node; if (ret_val) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; PMD_DRV_LOG(ERR, "configure queue group node %u failed", tm_node->id); - goto reset_vsi; + goto reset_leaf; } idx_qg++; @@ -872,10 +954,11 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev, if (idx_vsi_child >= nb_vsi_child) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; PMD_DRV_LOG(ERR, "too many queues"); - goto reset_vsi; + goto reset_leaf; } } + /* config queue nodes */ TAILQ_FOREACH(tm_node, queue_list, node) { qid = tm_node->id; txq = dev->data->tx_queues[qid]; @@ -890,14 +973,17 @@ static int ice_hierarchy_commit(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "configure queue group node %u failed", tm_node->id); - goto reset_vsi; + goto reset_leaf; } } return ret_val; -reset_vsi: - ice_set_node_rate(hw, NULL, vsi_node->info.node_teid, ICE_AGG_TYPE_VSI); +reset_leaf: + ice_remove_leaf_nodes(dev); +add_leaf: + ice_add_leaf_nodes(dev); + ice_reset_noleaf_nodes(dev); fail_clear: /* clear all the traffic manager configuration */ if (clear_on_fail) {