From patchwork Sat May 27 08:17:55 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenzhuo Lu X-Patchwork-Id: 24740 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 2A5DE7CF6; Sat, 27 May 2017 10:18:26 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 16AC35A6A for ; Sat, 27 May 2017 10:17:52 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 May 2017 01:17:52 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.38,400,1491289200"; d="scan'208";a="92489265" Received: from dpdk26.sh.intel.com ([10.239.128.228]) by orsmga002.jf.intel.com with ESMTP; 27 May 2017 01:17:51 -0700 From: Wenzhuo Lu To: dev@dpdk.org Cc: jingjing.wu@intel.com, cristian.dumitrescu@intel.com, jasvinder.singh@intel.com, Wenzhuo Lu Date: Sat, 27 May 2017 16:17:55 +0800 Message-Id: <1495873075-49542-21-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1495873075-49542-1-git-send-email-wenzhuo.lu@intel.com> References: <1495873075-49542-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH 20/20] net/ixgbe: support committing TM hierarchy X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add the support of the Traffic Management API, rte_tm_hierarchy_commit. When calling this API, the driver tries to enable the TM configuration on HW. Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_ethdev.c | 8 ++--- drivers/net/ixgbe/ixgbe_ethdev.h | 2 ++ drivers/net/ixgbe/ixgbe_tm.c | 69 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 5 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index d339fc4..e234177 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -302,9 +302,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbe_configure_msix(struct rte_eth_dev *dev); -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate); - static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); @@ -5605,8 +5602,9 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); } -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate) +int +ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t rf_dec, rf_int; diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index ccde335..48cf5b6 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -729,6 +729,8 @@ int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); void ixgbe_tm_conf_init(struct rte_eth_dev *dev); void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); +int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t tx_rate); static inline int ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c index abb4643..c52f591 100644 --- a/drivers/net/ixgbe/ixgbe_tm.c +++ b/drivers/net/ixgbe/ixgbe_tm.c @@ -61,6 +61,9 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id, struct rte_tm_node_capabilities *cap, struct rte_tm_error *error); +static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error); const struct rte_tm_ops ixgbe_tm_ops = { .capabilities_get = ixgbe_tm_capabilities_get, @@ -71,6 +74,7 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, .node_type_get = ixgbe_node_type_get, .level_capabilities_get = ixgbe_level_capabilities_get, .node_capabilities_get = ixgbe_node_capabilities_get, + .hierarchy_commit = ixgbe_hierarchy_commit, }; int @@ -779,3 +783,68 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, return 0; } + +static int +ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_node *tm_node; + uint64_t bw; + int ret; + int i; + + if (!error) + return -EINVAL; + + /* check the setting */ + if (tm_conf->root) + return 0; + + /* not support port max bandwidth yet */ + if (tm_conf->root->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no port max bandwidth"; + goto fail_clear; + } + + /* HW not support TC max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no TC max bandwidth"; + goto fail_clear; + } + } + + /* queue max bandwidth */ + i = 0; + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + bw = tm_node->shaper_profile->profile.peak.rate; + if (bw) { + /* interpret Bps to Mbps */ + bw = bw * 8 / 1000 / 1000; + ret = ixgbe_set_queue_rate_limit(dev, i, bw); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = + "failed to set queue max bandwidth"; + goto fail_clear; + } + } + + i++; + } + + return 0; + +fail_clear: + /* clear all the traffic manager configuration */ + if (clear_on_fail) { + ixgbe_tm_conf_uninit(dev); + ixgbe_tm_conf_init(dev); + } + return -EINVAL; +}