From patchwork Sat May 27 08:17:53 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenzhuo Lu X-Patchwork-Id: 24737 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id D98DF7CC3; Sat, 27 May 2017 10:18:20 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 4B6CF5323 for ; Sat, 27 May 2017 10:17:50 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 May 2017 01:17:50 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.38,400,1491289200"; d="scan'208";a="92489252" Received: from dpdk26.sh.intel.com ([10.239.128.228]) by orsmga002.jf.intel.com with ESMTP; 27 May 2017 01:17:48 -0700 From: Wenzhuo Lu To: dev@dpdk.org Cc: jingjing.wu@intel.com, cristian.dumitrescu@intel.com, jasvinder.singh@intel.com, Wenzhuo Lu Date: Sat, 27 May 2017 16:17:53 +0800 Message-Id: <1495873075-49542-19-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1495873075-49542-1-git-send-email-wenzhuo.lu@intel.com> References: <1495873075-49542-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH 18/20] net/ixgbe: support getting TM level capability X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add the support of the Traffic Management API, rte_tm_level_capabilities_get. Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_tm.c | 78 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c index 68b26cc..4a9947d 100644 --- a/drivers/net/ixgbe/ixgbe_tm.c +++ b/drivers/net/ixgbe/ixgbe_tm.c @@ -53,6 +53,10 @@ static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, struct rte_tm_error *error); static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf, struct rte_tm_error *error); +static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error); const struct rte_tm_ops ixgbe_tm_ops = { .capabilities_get = ixgbe_tm_capabilities_get, @@ -61,6 +65,7 @@ static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, .node_add = ixgbe_node_add, .node_delete = ixgbe_node_delete, .node_type_get = ixgbe_node_type_get, + .level_capabilities_get = ixgbe_level_capabilities_get, }; int @@ -638,3 +643,76 @@ static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, return 0; } + +static int +ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + uint8_t nb_tc = 0; + uint8_t nb_queue = 0; + + if (!cap || !error) + return -EINVAL; + + if (level_id >= IXGBE_TM_NODE_TYPE_MAX) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "too deep level"; + return -EINVAL; + } + + nb_tc = ixgbe_tc_nb_get(dev); + nb_queue = dev->data->nb_tx_queues; + + /* root node */ + if (level_id == IXGBE_TM_NODE_TYPE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->n_nodes_leaf_max = 0; + cap->non_leaf_nodes_identical = false; + cap->leaf_nodes_identical = false; + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = false; + cap->nonleaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->nonleaf.shaper_private_rate_max = 1250000000ull; + cap->nonleaf.shaper_shared_n_max = 0; + cap->nonleaf.sched_n_children_max = nb_tc; + cap->nonleaf.sched_sp_n_priorities_max = 0; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 0; + cap->nonleaf.stats_mask = 0; + + return 0; + } + + /* TC or queue node */ + if (level_id == IXGBE_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = nb_tc; + cap->n_nodes_nonleaf_max = nb_tc; + cap->n_nodes_leaf_max = nb_tc; + cap->non_leaf_nodes_identical = true; + } else { + /* queue */ + cap->n_nodes_max = nb_queue; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = nb_queue; + cap->non_leaf_nodes_identical = false; + } + cap->leaf_nodes_identical = true; + cap->leaf.shaper_private_supported = true; + cap->leaf.shaper_private_dual_rate_supported = false; + cap->leaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->leaf.shaper_private_rate_max = 1250000000ull; + cap->leaf.shaper_shared_n_max = 0; + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = false; + cap->leaf.cman_wred_context_shared_n_max = 0; + cap->leaf.stats_mask = 0; + + return 0; +}