[v1] net/ice: fix not clear bandwidth correctly when DCF close
Checks
Commit Message
When closing DCF, the bandwidth limit configured for VFs by DCF is not
cleared correctly. The configuration will still take effect when DCF starts
again, if VFs are not re-allocated. This patch cleared VFs bandwidth limit
when DCF closes, and DCF needs to re-configure bandwidth for VFs when it
starts next time.
Fixes: 3a6bfc37eaf4 ("net/ice: support QoS config VF bandwidth in DCF")
Cc: stable@dpdk.org
Signed-off-by: Ting Xu <ting.xu@intel.com>
---
drivers/net/ice/ice_dcf.c | 10 ++++++---
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_sched.c | 39 ++++++++++++++++++++++++++++++++-
3 files changed, 46 insertions(+), 4 deletions(-)
Comments
> -----Original Message-----
> From: Xu, Ting <ting.xu@intel.com>
> Sent: Thursday, July 15, 2021 10:17 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; Xu, Ting <ting.xu@intel.com>;
> stable@dpdk.org
> Subject: [PATCH v1] net/ice: fix not clear bandwidth correctly when DCF close
>
> When closing DCF, the bandwidth limit configured for VFs by DCF is not cleared
> correctly. The configuration will still take effect when DCF starts again, if VFs
> are not re-allocated. This patch cleared VFs bandwidth limit when DCF closes,
> and DCF needs to re-configure bandwidth for VFs when it starts next time.
>
> Fixes: 3a6bfc37eaf4 ("net/ice: support QoS config VF bandwidth in DCF")
> Cc: stable@dpdk.org
No need cc stable as in the same release
>
> Signed-off-by: Ting Xu <ting.xu@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
@@ -706,6 +706,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+ if (hw->tm_conf.committed) {
+ ice_dcf_clear_bw(hw);
+ ice_dcf_tm_conf_uninit(eth_dev);
+ }
+
ice_dcf_disable_irq0(hw);
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
@@ -714,14 +720,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
ice_dcf_mode_disable(hw);
iavf_shutdown_adminq(&hw->avf);
- if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
- ice_dcf_tm_conf_uninit(eth_dev);
-
rte_free(hw->arq_buf);
rte_free(hw->vf_vsi_map);
rte_free(hw->vf_res);
rte_free(hw->rss_lut);
rte_free(hw->rss_key);
+ rte_free(hw->qos_bw_cfg);
}
static int
@@ -133,5 +133,6 @@ int ice_dcf_link_update(struct rte_eth_dev *dev,
void ice_dcf_tm_conf_init(struct rte_eth_dev *dev);
void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev);
int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id);
+int ice_dcf_clear_bw(struct ice_dcf_hw *hw);
#endif /* _ICE_DCF_H_ */
@@ -32,6 +32,9 @@ const struct rte_tm_ops ice_dcf_tm_ops = {
.node_delete = ice_dcf_node_delete,
};
+#define ICE_DCF_SCHED_TC_NODE 0xffff
+#define ICE_DCF_VFID 0
+
void
ice_dcf_tm_conf_init(struct rte_eth_dev *dev)
{
@@ -709,6 +712,32 @@ ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id)
return ICE_SUCCESS;
}
+int
+ice_dcf_clear_bw(struct ice_dcf_hw *hw)
+{
+ uint16_t vf_id;
+ uint32_t tc;
+ int ret, size;
+
+ size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+ sizeof(struct virtchnl_dcf_bw_cfg) *
+ (hw->tm_conf.nb_tc_node - 1);
+
+ for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) {
+ for (tc = 0; tc < hw->tm_conf.nb_tc_node; tc++) {
+ hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.peak = 0;
+ hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.committed = 0;
+ }
+ ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "VF %u BW clear failed", vf_id);
+ return ICE_ERR_CFG;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
int clear_on_fail,
__rte_unused struct rte_tm_error *error)
@@ -748,7 +777,6 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
cir_total = 0;
/* init tc bw configuration */
-#define ICE_DCF_SCHED_TC_NODE 0xffff
tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE;
tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW;
tc_bw->num_elem = hw->tm_conf.nb_tc_node;
@@ -825,6 +853,15 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
if (ret_val)
goto fail_clear;
+ /* store TC node bw configuration */
+ hw->qos_bw_cfg[ICE_DCF_VFID] = rte_zmalloc("tc_bw_cfg", size, 0);
+ if (!hw->qos_bw_cfg[ICE_DCF_VFID]) {
+ ret_val = ICE_ERR_NO_MEMORY;
+ goto fail_clear;
+ }
+ ice_memcpy(hw->qos_bw_cfg[ICE_DCF_VFID], tc_bw, sizeof(*tc_bw),
+ ICE_NONDMA_TO_NONDMA);
+
hw->tm_conf.committed = true;
return ret_val;