From patchwork Thu Jul 8 02:33:46 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 95520 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AEF75A0C49; Thu, 8 Jul 2021 04:30:51 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 679054069C; Thu, 8 Jul 2021 04:30:51 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 9387C40687 for ; Thu, 8 Jul 2021 04:30:49 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10038"; a="209391269" X-IronPort-AV: E=Sophos;i="5.84,222,1620716400"; d="scan'208";a="209391269" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 07 Jul 2021 19:30:48 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,222,1620716400"; d="scan'208";a="487256557" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.193]) by FMSMGA003.fm.intel.com with ESMTP; 07 Jul 2021 19:30:46 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, Ting Xu Date: Thu, 8 Jul 2021 10:33:46 +0800 Message-Id: <20210708023346.34436-1-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210702150027.26294-1-ting.xu@intel.com> References: <20210702150027.26294-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v2] net/ice: support QoS BW config after VF reset in DCF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When VF reset happens, the QoS bandwidth configuration will be lost. If the reset is not caused by DCB change, it is supposed to replay the bandwidth configuration to VF by DCF. In this patch, when a vsi update PF event is received from PF after VF reset, and it is confirmed that DCB is not changed, bandwidth configuration will be replayed. Signed-off-by: Ting Xu Acked-by: Qi Zhang --- v1->v2: rebase --- drivers/net/ice/ice_dcf.c | 11 +++++-- drivers/net/ice/ice_dcf.h | 2 ++ drivers/net/ice/ice_dcf_ethdev.c | 1 - drivers/net/ice/ice_dcf_parent.c | 3 ++ drivers/net/ice/ice_dcf_sched.c | 52 +++++++++++++++++++++++++++++++- 5 files changed, 65 insertions(+), 4 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 349d23ee4f..045800a2d9 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -577,7 +577,7 @@ int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - int ret; + int ret, size; hw->avf.hw_addr = pci_dev->mem_resource[0].addr; hw->avf.back = hw; @@ -669,8 +669,15 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) } } - if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) + if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) { ice_dcf_tm_conf_init(eth_dev); + size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs; + hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0); + if (!hw->qos_bw_cfg) { + PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg"); + goto err_rss; + } + } hw->eth_dev = eth_dev; rte_intr_callback_register(&pci_dev->intr_handle, diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 1c7653de3d..711c0cf3ad 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -90,6 +90,7 @@ struct ice_dcf_hw { uint16_t pf_vsi_id; struct ice_dcf_tm_conf tm_conf; + struct virtchnl_dcf_bw_cfg_list **qos_bw_cfg; struct ice_aqc_port_ets_elem *ets_config; struct virtchnl_version_info virtchnl_version; struct virtchnl_vf_resource *vf_res; /* VF resource */ @@ -131,5 +132,6 @@ int ice_dcf_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete); void ice_dcf_tm_conf_init(struct rte_eth_dev *dev); void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev); +int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id); #endif /* _ICE_DCF_H_ */ diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 69fe6e63d1..cab7c4da87 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -622,7 +622,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false); dev->data->dev_link.link_status = ETH_LINK_DOWN; ad->pf.adapter_stopped = 1; - dcf_ad->real_hw.tm_conf.committed = false; return 0; } diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c index c59cd0bef9..03155c9df0 100644 --- a/drivers/net/ice/ice_dcf_parent.c +++ b/drivers/net/ice/ice_dcf_parent.c @@ -143,6 +143,9 @@ ice_dcf_vsi_update_service_handler(void *param) } } + if (hw->tm_conf.committed) + ice_dcf_replay_vf_bw(hw, reset_param->vf_id); + rte_spinlock_unlock(&vsi_update_lock); free(param); diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c index 8a0529a3bc..1e16654d90 100644 --- a/drivers/net/ice/ice_dcf_sched.c +++ b/drivers/net/ice/ice_dcf_sched.c @@ -668,6 +668,47 @@ static int ice_dcf_commit_check(struct ice_dcf_hw *hw) return ICE_SUCCESS; } +int +ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id) +{ + struct ice_aqc_port_ets_elem old_ets_config; + struct ice_dcf_adapter *adapter; + struct ice_hw *parent_hw; + int ret, size; + + adapter = hw->eth_dev->data->dev_private; + parent_hw = &adapter->parent.hw; + + /* store the old ets config */ + old_ets_config = *hw->ets_config; + + ice_memset(hw->ets_config, 0, sizeof(*hw->ets_config), ICE_NONDMA_MEM); + ret = ice_aq_query_port_ets(parent_hw->port_info, + hw->ets_config, sizeof(*hw->ets_config), + NULL); + if (ret) { + PMD_DRV_LOG(ERR, "DCF Query Port ETS failed"); + return ret; + } + + if (memcmp(&old_ets_config, hw->ets_config, sizeof(old_ets_config))) { + PMD_DRV_LOG(DEBUG, "ETS config changes, do not replay BW"); + return ICE_SUCCESS; + } + + size = sizeof(struct virtchnl_dcf_bw_cfg_list) + + sizeof(struct virtchnl_dcf_bw_cfg) * + (hw->tm_conf.nb_tc_node - 1); + + ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size); + if (ret) { + PMD_DRV_LOG(DEBUG, "VF %u BW replay failed", vf_id); + return ICE_ERR_CFG; + } + + return ICE_SUCCESS; +} + static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, __rte_unused struct rte_tm_error *error) @@ -757,7 +798,16 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev, ret_val = ice_dcf_set_vf_bw(hw, vf_bw, size); if (ret_val) goto fail_clear; - memset(vf_bw, 0, size); + + hw->qos_bw_cfg[vf_id] = rte_zmalloc("vf_bw_cfg", size, 0); + if (!hw->qos_bw_cfg[vf_id]) { + ret_val = ICE_ERR_NO_MEMORY; + goto fail_clear; + } + /* store the bandwidth information for replay */ + ice_memcpy(hw->qos_bw_cfg[vf_id], vf_bw, sizeof(*vf_bw), + ICE_NONDMA_TO_NONDMA); + ice_memset(vf_bw, 0, size, ICE_NONDMA_MEM); } /* check if total CIR is larger than port bandwidth */