From patchwork Thu Sep 17 08:42:31 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78020 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8D908A04B6; Thu, 17 Sep 2020 10:42:57 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7A5731D59D; Thu, 17 Sep 2020 10:42:54 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id B111C1D587 for ; Thu, 17 Sep 2020 10:42:48 +0200 (CEST) IronPort-SDR: qrcMGvpEp/bEftDbi2WJPexYL0J4t8rNRNq1vnwXtDNzhqrjQKNpM5YnfxcboNtHkfBI+YNQ03 oie6dtJL0zrQ== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060481" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060481" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:47 -0700 IronPort-SDR: y/UmMdJIMOgcWB3mkVZXxrg9qZrAC6e8y6JbsWSr9Wn3gU2ZA10R0u+fSHRJ666x4/y3Ezo8gR 9/X1306bw4aA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385658" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:46 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:31 +0100 Message-Id: <1600332159-26018-2-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 1/9] sched: add support profile data structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add subport profile data structure to support dynamic configuration of subport bandwidth Signed-off-by: Savinay Dharmappa Signed-off-by: Jasvinder Singh --- lib/librte_sched/rte_sched.c | 10 ++++++++++ lib/librte_sched/rte_sched.h | 25 +++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 0fa0741..dc5beb2 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -101,6 +101,16 @@ enum grinder_state { e_GRINDER_READ_MBUF }; +struct rte_sched_subport_profile { + /* Token bucket (TB) */ + uint64_t tb_period; + uint64_t tb_credits_per_period; + uint64_t tb_size; + + uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint64_t tc_period; +}; + struct rte_sched_grinder { /* Pipe cache */ uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE]; diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h index 8a5a93c..39339b7 100644 --- a/lib/librte_sched/rte_sched.h +++ b/lib/librte_sched/rte_sched.h @@ -192,6 +192,20 @@ struct rte_sched_subport_params { #endif }; +struct rte_sched_subport_profile_params { + /** Token bucket rate (measured in bytes per second) */ + uint64_t tb_rate; + + /** Token bucket size (measured in credits) */ + uint64_t tb_size; + + /** Traffic class rates (measured in bytes per second) */ + uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + + /** Enforcement period for rates (measured in milliseconds) */ + uint64_t tc_period; +}; + /** Subport statistics */ struct rte_sched_subport_stats { /** Number of packets successfully written */ @@ -254,6 +268,17 @@ struct rte_sched_port_params { /** Number of subports */ uint32_t n_subports_per_port; + /** subport profile table. + * Every pipe is configured using one of the profiles from this table. + */ + struct rte_sched_subport_profile_params *subport_profiles; + + /** Profiles in the pipe profile table */ + uint32_t n_subport_profiles; + + /** Max allowed profiles in the pipe profile table */ + uint32_t n_max_subport_profiles; + /** Maximum number of subport pipes. * This parameter is used to reserve a fixed number of bits * in struct rte_mbuf::sched.queue_id for the pipe_id for all From patchwork Thu Sep 17 08:42:32 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78021 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C8C4CA04B6; Thu, 17 Sep 2020 10:43:06 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DDA9B1D5A9; Thu, 17 Sep 2020 10:42:55 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 6B7F01D597 for ; Thu, 17 Sep 2020 10:42:50 +0200 (CEST) IronPort-SDR: 5uKsVMUZeJjPKjfcsWQE0jYtN0jhvpRDGlcG7xYVDAaXqcs1rHJxVdwawcYxa9iEQQdm28L4zU uMJr67YXmu7g== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060482" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060482" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:49 -0700 IronPort-SDR: hJeOOK4FDdGeyod94HFWwFCgrX8b3yQymyGnrGPkV3T28RwKsq/+gpE8ppuQrIGphW7BtC8mig 8u3wdwd5KaCg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385665" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:48 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:32 +0100 Message-Id: <1600332159-26018-3-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 2/9] sched: add subport profile table X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add subport profile table to internal port data structure and update the port config function. Signed-off-by: Savinay Dharmappa Signed-off-by: Jasvinder Singh --- lib/librte_sched/rte_sched.c | 185 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 182 insertions(+), 3 deletions(-) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index dc5beb2..5fa7865 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -222,6 +222,8 @@ struct rte_sched_port { uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE]; uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE]; + uint32_t n_subport_profiles; + uint32_t n_max_subport_profiles; uint64_t rate; uint32_t mtu; uint32_t frame_overhead; @@ -240,6 +242,7 @@ struct rte_sched_port { uint32_t subport_id; /* Large data structures */ + struct rte_sched_subport_profile *subport_profiles; struct rte_sched_subport *subports[0] __rte_cache_aligned; } __rte_cache_aligned; @@ -386,8 +389,60 @@ pipe_profile_check(struct rte_sched_pipe_params *params, } static int +subport_profile_check(struct rte_sched_subport_profile_params *params, + uint64_t rate) +{ + uint32_t i; + + /* Check user parameters */ + if (params == NULL) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for parameter params\n", __func__); + return -EINVAL; + } + + if (params->tb_rate == 0 || params->tb_rate > rate) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for tb rate\n", __func__); + return -EINVAL; + } + + if (params->tb_size == 0) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for tb size\n", __func__); + return -EINVAL; + } + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + uint64_t tc_rate = params->tc_rate[i]; + + if (tc_rate == 0 || (tc_rate > params->tb_rate)) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for tc rate\n", __func__); + return -EINVAL; + } + } + + if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect tc rate(best effort)\n", __func__); + return -EINVAL; + } + + if (params->tc_period == 0) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for tc period\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int rte_sched_port_check_params(struct rte_sched_port_params *params) { + uint32_t i; + if (params == NULL) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for parameter params\n", __func__); @@ -424,6 +479,29 @@ rte_sched_port_check_params(struct rte_sched_port_params *params) return -EINVAL; } + if (params->subport_profiles == NULL || + params->n_subport_profiles == 0 || + params->n_max_subport_profiles == 0 || + params->n_subport_profiles > params->n_max_subport_profiles) { + RTE_LOG(ERR, SCHED, + "%s: Incorrect value for subport profiles\n", __func__); + return -EINVAL; + } + + for (i = 0; i < params->n_subport_profiles; i++) { + struct rte_sched_subport_profile_params *p = + params->subport_profiles + i; + int status; + + status = subport_profile_check(p, params->rate); + if (status != 0) { + RTE_LOG(ERR, SCHED, + "%s: subport profile check failed(%d)\n", + __func__, status); + return -EINVAL; + } + } + /* n_pipes_per_subport: non-zero, power of 2 */ if (params->n_pipes_per_subport == 0 || !rte_is_power_of_2(params->n_pipes_per_subport)) { @@ -565,6 +643,42 @@ rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i) p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]); } +static void +rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i) +{ + struct rte_sched_subport_profile *p = port->subport_profiles + i; + + RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n" + "Token bucket: period = %"PRIu64", credits per period = %"PRIu64"," + "size = %"PRIu64"\n" + "Traffic classes: period = %"PRIu64",\n" + "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 + " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 + " %"PRIu64", %"PRIu64", %"PRIu64"]\n", + i, + + /* Token bucket */ + p->tb_period, + p->tb_credits_per_period, + p->tb_size, + + /* Traffic classes */ + p->tc_period, + p->tc_credits_per_period[0], + p->tc_credits_per_period[1], + p->tc_credits_per_period[2], + p->tc_credits_per_period[3], + p->tc_credits_per_period[4], + p->tc_credits_per_period[5], + p->tc_credits_per_period[6], + p->tc_credits_per_period[7], + p->tc_credits_per_period[8], + p->tc_credits_per_period[9], + p->tc_credits_per_period[10], + p->tc_credits_per_period[11], + p->tc_credits_per_period[12]); +} + static inline uint64_t rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate) { @@ -634,6 +748,37 @@ rte_sched_pipe_profile_convert(struct rte_sched_subport *subport, } static void +rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src, + struct rte_sched_subport_profile *dst, + uint64_t rate) +{ + uint32_t i; + + /* Token Bucket */ + if (src->tb_rate == rate) { + dst->tb_credits_per_period = 1; + dst->tb_period = 1; + } else { + double tb_rate = (double) src->tb_rate + / (double) rate; + double d = RTE_SCHED_TB_RATE_CONFIG_ERR; + + rte_approx_64(tb_rate, d, &dst->tb_credits_per_period, + &dst->tb_period); + } + + dst->tb_size = src->tb_size; + + /* Traffic Classes */ + dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate); + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + dst->tc_credits_per_period[i] + = rte_sched_time_ms_to_bytes(src->tc_period, + src->tc_rate[i]); +} + +static void rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport, struct rte_sched_subport_params *params, uint64_t rate) { @@ -657,6 +802,24 @@ rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport, } } +static void +rte_sched_port_config_subport_profile_table(struct rte_sched_port *port, + struct rte_sched_port_params *params, + uint64_t rate) +{ + uint32_t i; + + for (i = 0; i < port->n_subport_profiles; i++) { + struct rte_sched_subport_profile_params *src + = params->subport_profiles + i; + struct rte_sched_subport_profile *dst + = port->subport_profiles + i; + + rte_sched_subport_profile_convert(src, dst, rate); + rte_sched_port_log_subport_profile(port, i); + } +} + static int rte_sched_subport_check_params(struct rte_sched_subport_params *params, uint32_t n_max_pipes_per_subport, @@ -803,7 +966,7 @@ struct rte_sched_port * rte_sched_port_config(struct rte_sched_port_params *params) { struct rte_sched_port *port = NULL; - uint32_t size0, size1; + uint32_t size0, size1, size2; uint32_t cycles_per_byte; uint32_t i, j; int status; @@ -818,10 +981,21 @@ rte_sched_port_config(struct rte_sched_port_params *params) size0 = sizeof(struct rte_sched_port); size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *); + size2 = params->n_max_subport_profiles * + sizeof(struct rte_sched_subport_profile); /* Allocate memory to store the data structures */ - port = rte_zmalloc_socket("qos_params", size0 + size1, RTE_CACHE_LINE_SIZE, - params->socket); + port = rte_zmalloc_socket("qos_params", size0 + size1, + RTE_CACHE_LINE_SIZE, params->socket); + if (port == NULL) { + RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); + + return NULL; + } + + /* Allocate memory to store the subport profile */ + port->subport_profiles = rte_zmalloc_socket("subport_profile", size2, + RTE_CACHE_LINE_SIZE, params->socket); if (port == NULL) { RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); @@ -830,6 +1004,8 @@ rte_sched_port_config(struct rte_sched_port_params *params) /* User parameters */ port->n_subports_per_port = params->n_subports_per_port; + port->n_subport_profiles = params->n_subport_profiles; + port->n_max_subport_profiles = params->n_max_subport_profiles; port->n_pipes_per_subport = params->n_pipes_per_subport; port->n_pipes_per_subport_log2 = __builtin_ctz(params->n_pipes_per_subport); @@ -860,6 +1036,9 @@ rte_sched_port_config(struct rte_sched_port_params *params) port->time_cpu_bytes = 0; port->time = 0; + /* Subport profile table */ + rte_sched_port_config_subport_profile_table(port, params, port->rate); + cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT) / params->rate; port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte); From patchwork Thu Sep 17 08:42:33 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78022 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C8A3AA04B6; Thu, 17 Sep 2020 10:43:12 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 446EC1D5B1; Thu, 17 Sep 2020 10:42:57 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 5EA261D59C for ; Thu, 17 Sep 2020 10:42:51 +0200 (CEST) IronPort-SDR: PPbZMnm8h2WvTx7XyvIW8LvPi+SFc5Tu4f+uktinyFJy0xsXqtXZMPGtaz4SievJUwABLPDfgz liCGflLTBdCw== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060487" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060487" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:50 -0700 IronPort-SDR: SlOmL7PKn4eoBJ1aho2OWxmne2aAQSd8DQ7nk2zEOLuvOTduYLKuk/+DXcNwQ4QyT4hNhS9qRW gEEzvappBB4A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385674" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:49 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:33 +0100 Message-Id: <1600332159-26018-4-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 3/9] sched: add subport profile add and config api X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add apis to add new subport profile and configure it. Signed-off-by: Savinay Dharmappa Signed-off-by: Jasvinder Singh --- lib/librte_sched/rte_sched.c | 118 +++++++++++++++++++++++++++++++++ lib/librte_sched/rte_sched.h | 45 +++++++++++++ lib/librte_sched/rte_sched_version.map | 2 + 3 files changed, 165 insertions(+) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 5fa7865..23aaec4 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -174,6 +174,8 @@ struct rte_sched_subport { /* Statistics */ struct rte_sched_subport_stats stats __rte_cache_aligned; + /* subport profile */ + uint32_t profile; /* Subport pipes */ uint32_t n_pipes_per_subport_enabled; uint32_t n_pipe_profiles; @@ -1343,6 +1345,56 @@ rte_sched_subport_config(struct rte_sched_port *port, } int +rte_sched_subport_profile_config(struct rte_sched_port *port, + uint32_t subport_id, + uint32_t profile_id) +{ + int i; + struct rte_sched_subport_profile *params; + uint32_t n_subports = subport_id + 1; + struct rte_sched_subport *s; + + if (port == NULL) { + RTE_LOG(ERR, SCHED, + "%s: Incorrect value for parameter port\n", __func__); + return -EINVAL; + } + + if (subport_id >= port->n_subports_per_port) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for parameter subport id\n", __func__); + + rte_sched_free_memory(port, n_subports); + return -EINVAL; + } + + params = port->subport_profiles + profile_id; + + s = port->subports[subport_id]; + + s->tb_credits = params->tb_size / 2; + + s->tc_time = port->time + params->tc_period; + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + if (s->qsize[i]) + s->tc_credits[i] = + params->tc_credits_per_period[i]; + else + params->tc_credits_per_period[i] = 0; + +#ifdef RTE_SCHED_SUBPORT_TC_OV + s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period, + s->pipe_tc_be_rate_max); +#endif + s->profile = profile_id; + + rte_sched_port_log_subport_profile(port, profile_id); + + return 0; +} + +int rte_sched_pipe_config(struct rte_sched_port *port, uint32_t subport_id, uint32_t pipe_id, @@ -1526,6 +1578,72 @@ rte_sched_subport_pipe_profile_add(struct rte_sched_port *port, return 0; } +int +rte_sched_port_subport_profile_add(struct rte_sched_port *port, + struct rte_sched_subport_profile_params *params, + uint32_t *subport_profile_id) +{ + int status; + uint32_t i; + struct rte_sched_subport_profile *dst; + + /* Port */ + if (port == NULL) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for parameter port\n", __func__); + return -EINVAL; + } + + if (params == NULL) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for parameter profile\n", __func__); + return -EINVAL; + } + + if (subport_profile_id == NULL) { + RTE_LOG(ERR, SCHED, "%s: " + "Incorrect value for parameter subport_profile_id\n", + __func__); + return -EINVAL; + } + + dst = port->subport_profiles + port->n_subport_profiles; + + /* Subport profiles exceeds the max limit */ + if (port->n_subport_profiles >= port->n_max_subport_profiles) { + RTE_LOG(ERR, SCHED, "%s: " + "Number of subport profiles exceeds the max limit\n", + __func__); + return -EINVAL; + } + + status = subport_profile_check(params, port->rate); + if (status != 0) { + RTE_LOG(ERR, SCHED, + "%s: subport profile check failed(%d)\n", __func__, status); + return -EINVAL; + } + + rte_sched_subport_profile_convert(params, dst, port->rate); + + /* Subport profile should not exists */ + for (i = 0; i < port->n_subport_profiles; i++) + if (memcmp(port->subport_profiles + i, + dst, sizeof(*dst)) == 0) { + RTE_LOG(ERR, SCHED, + "%s: subport profile exists\n", __func__); + return -EINVAL; + } + + /* Subport profile commit */ + *subport_profile_id = port->n_subport_profiles; + port->n_subport_profiles++; + + rte_sched_port_log_subport_profile(port, *subport_profile_id); + + return 0; +} + static inline uint32_t rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h index 39339b7..a7c2638 100644 --- a/lib/librte_sched/rte_sched.h +++ b/lib/librte_sched/rte_sched.h @@ -337,6 +337,29 @@ rte_sched_subport_pipe_profile_add(struct rte_sched_port *port, uint32_t *pipe_profile_id); /** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Hierarchical scheduler subport bandwidth profile add + * Note that this function is safe to use in runtime for adding new + * subport bandwidth profile as it doesn't have any impact on hiearchical + * structure of the scheduler. + * @param port + * Handle to port scheduler instance + * @param struct rte_sched_subport_profile + * Subport bandwidth profile + * @param subport_profile_d + * Subport profile id + * @return + * 0 upon success, error code otherwise + */ +__rte_experimental +int +rte_sched_port_subport_profile_add(struct rte_sched_port *port, + struct rte_sched_subport_profile_params *profile, + uint32_t *subport_profile_id); + +/** * Hierarchical scheduler subport configuration * * @param port @@ -354,6 +377,28 @@ rte_sched_subport_config(struct rte_sched_port *port, struct rte_sched_subport_params *params); /** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Hierarchical scheduler subport profile configuration + * Note that this function is safe to use in runtime for applying any specific + * subport bandwidth profile as it doesn't have any impact on hiearchical + * structure of the scheduler. + * @param port + * Handle to port scheduler instance + * @param subport_id + * Subport ID + * @param profile_d + * Subport profile id + * @return + * 0 upon success, error code otherwise + */ +__rte_experimental +int +rte_sched_subport_profile_config(struct rte_sched_port *port, + uint32_t subport_id, + uint32_t profile_id); +/** * Hierarchical scheduler pipe configuration * * @param port diff --git a/lib/librte_sched/rte_sched_version.map b/lib/librte_sched/rte_sched_version.map index 3faef6f..e64335f 100644 --- a/lib/librte_sched/rte_sched_version.map +++ b/lib/librte_sched/rte_sched_version.map @@ -28,4 +28,6 @@ EXPERIMENTAL { global: rte_sched_subport_pipe_profile_add; + rte_sched_port_subport_profile_add; + rte_sched_subport_profile_config; }; From patchwork Thu Sep 17 08:42:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78023 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 13D11A04B6; Thu, 17 Sep 2020 10:43:21 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AC00E1D5B9; Thu, 17 Sep 2020 10:42:58 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id D9FC81D597 for ; Thu, 17 Sep 2020 10:42:52 +0200 (CEST) IronPort-SDR: oHWgeLsunGD84ehD/3z6qOLO3psKx9k6fRe957jyByd4oPwx1uz9uiKI8cWKLQ8MCsI2LRzLx8 zKPyeNZk0qhA== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060489" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060489" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:52 -0700 IronPort-SDR: AWWq753bknFizMsHRKFcTVaLucz3qjKAfLjbFt4iQ+GInTZHWh4Kz1FmbpZ3jgTDot+HCnZOx4 7HmbzZDxWMZQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385682" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:51 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:34 +0100 Message-Id: <1600332159-26018-5-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 4/9] sched: update grinder credit and pipe config function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Credits are updated by fetching subport profile parameters from subport profile table. Similarly subport best effort tc is calculated in pipe config. Signed-off-by: Savinay Dharmappa Signed-off-by: Jasvinder Singh --- lib/librte_sched/rte_sched.c | 55 ++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 23aaec4..3f61afa 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -123,6 +123,7 @@ struct rte_sched_grinder { uint32_t productive; uint32_t pindex; struct rte_sched_subport *subport; + struct rte_sched_subport_profile *subport_params; struct rte_sched_pipe *pipe; struct rte_sched_pipe_profile *pipe_params; @@ -1401,6 +1402,7 @@ rte_sched_pipe_config(struct rte_sched_port *port, int32_t pipe_profile) { struct rte_sched_subport *s; + struct rte_sched_subport_profile *sp; struct rte_sched_pipe *p; struct rte_sched_pipe_profile *params; uint32_t n_subports = subport_id + 1; @@ -1441,14 +1443,15 @@ rte_sched_pipe_config(struct rte_sched_port *port, return -EINVAL; } + sp = port->subport_profiles + s->profile; /* Handle the case when pipe already has a valid configuration */ p = s->pipe + pipe_id; if (p->tb_time) { params = s->pipe_profiles + p->profile; double subport_tc_be_rate = - (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - / (double) s->tc_period; + (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] + / (double) sp->tc_period; double pipe_tc_be_rate = (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] / (double) params->tc_period; @@ -1490,8 +1493,8 @@ rte_sched_pipe_config(struct rte_sched_port *port, { /* Subport best effort tc oversubscription */ double subport_tc_be_rate = - (double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - / (double) s->tc_period; + (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] + / (double) sp->tc_period; double pipe_tc_be_rate = (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] / (double) params->tc_period; @@ -2279,14 +2282,15 @@ grinder_credits_update(struct rte_sched_port *port, struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_sched_pipe *pipe = grinder->pipe; struct rte_sched_pipe_profile *params = grinder->pipe_params; + struct rte_sched_subport_profile *sp = grinder->subport_params; uint64_t n_periods; uint32_t i; /* Subport TB */ - n_periods = (port->time - subport->tb_time) / subport->tb_period; - subport->tb_credits += n_periods * subport->tb_credits_per_period; - subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size); - subport->tb_time += n_periods * subport->tb_period; + n_periods = (port->time - subport->tb_time) / sp->tb_period; + subport->tb_credits += n_periods * sp->tb_credits_per_period; + subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); + subport->tb_time += n_periods * sp->tb_period; /* Pipe TB */ n_periods = (port->time - pipe->tb_time) / params->tb_period; @@ -2297,9 +2301,9 @@ grinder_credits_update(struct rte_sched_port *port, /* Subport TCs */ if (unlikely(port->time >= subport->tc_time)) { for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) - subport->tc_credits[i] = subport->tc_credits_per_period[i]; + subport->tc_credits[i] = sp->tc_credits_per_period[i]; - subport->tc_time = port->time + subport->tc_period; + subport->tc_time = port->time + sp->tc_period; } /* Pipe TCs */ @@ -2315,8 +2319,10 @@ grinder_credits_update(struct rte_sched_port *port, static inline uint64_t grinder_tc_ov_credits_update(struct rte_sched_port *port, - struct rte_sched_subport *subport) + struct rte_sched_subport *subport, uint32_t pos) { + struct rte_sched_grinder *grinder = subport->grinder + pos; + struct rte_sched_subport_profile *sp = grinder->subport_params; uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; uint64_t tc_consumption = 0, tc_ov_consumption_max; uint64_t tc_ov_wm = subport->tc_ov_wm; @@ -2326,17 +2332,17 @@ grinder_tc_ov_credits_update(struct rte_sched_port *port, return subport->tc_ov_wm_max; for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) { - tc_ov_consumption[i] = - subport->tc_credits_per_period[i] - subport->tc_credits[i]; + tc_ov_consumption[i] = sp->tc_credits_per_period[i] + - subport->tc_credits[i]; tc_consumption += tc_ov_consumption[i]; } tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] = - subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - + sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE]; tc_ov_consumption_max = - subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - + sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] - tc_consumption; if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] > @@ -2362,14 +2368,15 @@ grinder_credits_update(struct rte_sched_port *port, struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_sched_pipe *pipe = grinder->pipe; struct rte_sched_pipe_profile *params = grinder->pipe_params; + struct rte_sched_subport_profile *sp = grinder->subport_params; uint64_t n_periods; uint32_t i; /* Subport TB */ - n_periods = (port->time - subport->tb_time) / subport->tb_period; - subport->tb_credits += n_periods * subport->tb_credits_per_period; - subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size); - subport->tb_time += n_periods * subport->tb_period; + n_periods = (port->time - subport->tb_time) / sp->tb_period; + subport->tb_credits += n_periods * sp->tb_credits_per_period; + subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); + subport->tb_time += n_periods * sp->tb_period; /* Pipe TB */ n_periods = (port->time - pipe->tb_time) / params->tb_period; @@ -2379,12 +2386,13 @@ grinder_credits_update(struct rte_sched_port *port, /* Subport TCs */ if (unlikely(port->time >= subport->tc_time)) { - subport->tc_ov_wm = grinder_tc_ov_credits_update(port, subport); + subport->tc_ov_wm = + grinder_tc_ov_credits_update(port, subport, pos); for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) - subport->tc_credits[i] = subport->tc_credits_per_period[i]; + subport->tc_credits[i] = sp->tc_credits_per_period[i]; - subport->tc_time = port->time + subport->tc_period; + subport->tc_time = port->time + sp->tc_period; subport->tc_ov_period_id++; } @@ -2907,6 +2915,9 @@ grinder_handle(struct rte_sched_port *port, struct rte_sched_pipe *pipe = grinder->pipe; grinder->pipe_params = subport->pipe_profiles + pipe->profile; + grinder->subport_params = port->subport_profiles + + subport->profile; + grinder_prefetch_tc_queue_arrays(subport, pos); grinder_credits_update(port, subport, pos); From patchwork Thu Sep 17 08:42:35 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78024 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B32D5A04B6; Thu, 17 Sep 2020 10:43:32 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ABC1C1D5A8; Thu, 17 Sep 2020 10:43:13 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 990561D59F for ; Thu, 17 Sep 2020 10:42:54 +0200 (CEST) IronPort-SDR: 71A+PU4Tn4ZW35L+5eXqhThUZ99YvlhwP+dU7JxjDzo52iDiyRWnnTUrAHAl60EG82KP+8ww00 yyuWyxb2V/pw== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060494" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060494" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:54 -0700 IronPort-SDR: LZH8H+f67fxWuympClPHqeUgChq3u1KUzVxkuzb4oesBmvogWD3A4U44HzeeJMjGrELnk56MMS f8caEzBTldGA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385691" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:52 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:35 +0100 Message-Id: <1600332159-26018-6-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 5/9] example/qos_sched: add dynamic config of subport X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Modify the qos_sched application to build the hierarchical scheduler with default subport bandwidth profile. It also allows to configure a subport with different subport bandwidth profile dynamically. Signed-off-by: Savinay Dharmappa --- examples/qos_sched/cfg_file.c | 151 ++++++++++++++++++++++++----------------- examples/qos_sched/cfg_file.h | 4 ++ examples/qos_sched/init.c | 24 +++++-- examples/qos_sched/main.h | 1 + examples/qos_sched/profile.cfg | 3 + 5 files changed, 115 insertions(+), 68 deletions(-) diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c index f078e4f..cd167bd 100644 --- a/examples/qos_sched/cfg_file.c +++ b/examples/qos_sched/cfg_file.c @@ -143,6 +143,93 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe_params } int +cfg_load_subport_profile(struct rte_cfgfile *cfg, + struct rte_sched_subport_profile_params *subport_profile) +{ + int i; + const char *entry; + int profiles; + + if (!cfg || !subport_profile) + return -1; + + profiles = rte_cfgfile_num_sections(cfg, "subport profile", + sizeof("subport profile") - 1); + subport_params[0].n_pipe_profiles = profiles; + + for (i = 0; i < profiles; i++) { + char sec_name[32]; + snprintf(sec_name, sizeof(sec_name), "subport profile %d", i); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tb rate"); + if (entry) + subport_profile[i].tb_rate = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tb size"); + if (entry) + subport_profile[i].tb_size = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc period"); + if (entry) + subport_profile[i].tc_period = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 0 rate"); + if (entry) + subport_profile[i].tc_rate[0] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 1 rate"); + if (entry) + subport_profile[i].tc_rate[1] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 2 rate"); + if (entry) + subport_profile[i].tc_rate[2] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 3 rate"); + if (entry) + subport_profile[i].tc_rate[3] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 4 rate"); + if (entry) + subport_profile[i].tc_rate[4] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 5 rate"); + if (entry) + subport_profile[i].tc_rate[5] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 6 rate"); + if (entry) + subport_profile[i].tc_rate[6] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 7 rate"); + if (entry) + subport_profile[i].tc_rate[7] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 8 rate"); + if (entry) + subport_profile[i].tc_rate[8] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 9 rate"); + if (entry) + subport_profile[i].tc_rate[9] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 10 rate"); + if (entry) + subport_profile[i].tc_rate[10] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 11 rate"); + if (entry) + subport_profile[i].tc_rate[11] = (uint64_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 12 rate"); + if (entry) + subport_profile[i].tc_rate[12] = (uint64_t)atoi(entry); + } + + return 0; +} + +int cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subport_params) { const char *entry; @@ -267,70 +354,6 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo } } - entry = rte_cfgfile_get_entry(cfg, sec_name, "tb rate"); - if (entry) - subport_params[i].tb_rate = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tb size"); - if (entry) - subport_params[i].tb_size = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc period"); - if (entry) - subport_params[i].tc_period = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 0 rate"); - if (entry) - subport_params[i].tc_rate[0] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 1 rate"); - if (entry) - subport_params[i].tc_rate[1] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 2 rate"); - if (entry) - subport_params[i].tc_rate[2] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 3 rate"); - if (entry) - subport_params[i].tc_rate[3] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 4 rate"); - if (entry) - subport_params[i].tc_rate[4] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 5 rate"); - if (entry) - subport_params[i].tc_rate[5] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 6 rate"); - if (entry) - subport_params[i].tc_rate[6] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 7 rate"); - if (entry) - subport_params[i].tc_rate[7] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 8 rate"); - if (entry) - subport_params[i].tc_rate[8] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 9 rate"); - if (entry) - subport_params[i].tc_rate[9] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 10 rate"); - if (entry) - subport_params[i].tc_rate[10] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 11 rate"); - if (entry) - subport_params[i].tc_rate[11] = (uint64_t)atoi(entry); - - entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 12 rate"); - if (entry) - subport_params[i].tc_rate[12] = (uint64_t)atoi(entry); - int n_entries = rte_cfgfile_section_num_entries(cfg, sec_name); struct rte_cfgfile_entry entries[n_entries]; diff --git a/examples/qos_sched/cfg_file.h b/examples/qos_sched/cfg_file.h index 2eccf1c..0dc458a 100644 --- a/examples/qos_sched/cfg_file.h +++ b/examples/qos_sched/cfg_file.h @@ -14,4 +14,8 @@ int cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe); int cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subport); +int cfg_load_subport_profile(struct rte_cfgfile *cfg, + struct rte_sched_subport_profile_params + *subport_profile); + #endif diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c index 9626c15..541adb7 100644 --- a/examples/qos_sched/init.c +++ b/examples/qos_sched/init.c @@ -196,15 +196,20 @@ static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = { }, }; -struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = { +static struct rte_sched_subport_profile_params + subport_profile[MAX_SCHED_SUBPORT_PROFILES] = { { .tb_rate = 1250000000, .tb_size = 1000000, - .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000}, .tc_period = 10, + }, +}; + +struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = { + { .n_pipes_per_subport_enabled = 4096, .qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64}, .pipe_profiles = pipe_profiles, @@ -289,6 +294,9 @@ struct rte_sched_port_params port_params = { .mtu = 6 + 6 + 4 + 4 + 2 + 1500, .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT, .n_subports_per_port = 1, + .n_subport_profiles = 1, + .subport_profiles = subport_profile, + .n_max_subport_profiles = MAX_SCHED_SUBPORT_PROFILES, .n_pipes_per_subport = MAX_SCHED_PIPES, }; @@ -320,8 +328,15 @@ app_init_sched_port(uint32_t portid, uint32_t socketid) for (subport = 0; subport < port_params.n_subports_per_port; subport ++) { err = rte_sched_subport_config(port, subport, &subport_params[subport]); if (err) { - rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n", - subport, err); + rte_exit(EXIT_FAILURE, "Unable to config schedi " + "subport %u, err=%d\n", subport, err); + } + + err = rte_sched_subport_profile_config(port, subport, 0); + if (err) { + rte_exit(EXIT_FAILURE, "failed to configure " + "profile err=%d\n", err); + } uint32_t n_pipes_per_subport = @@ -354,6 +369,7 @@ app_load_cfg_profile(const char *profile) cfg_load_port(file, &port_params); cfg_load_subport(file, subport_params); + cfg_load_subport_profile(file, subport_profile); cfg_load_pipe(file, pipe_profiles); rte_cfgfile_close(file); diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h index 23bc418..0d6815a 100644 --- a/examples/qos_sched/main.h +++ b/examples/qos_sched/main.h @@ -51,6 +51,7 @@ extern "C" { #define MAX_SCHED_SUBPORTS 8 #define MAX_SCHED_PIPES 4096 #define MAX_SCHED_PIPE_PROFILES 256 +#define MAX_SCHED_SUBPORT_PROFILES 8 #ifndef APP_COLLECT_STAT #define APP_COLLECT_STAT 1 diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg index 61b8b70..4486d27 100644 --- a/examples/qos_sched/profile.cfg +++ b/examples/qos_sched/profile.cfg @@ -26,6 +26,9 @@ number of subports per port = 1 number of pipes per subport = 4096 queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 +subport 0-8 = 0 ; These subports are configured with subport profile 0 + +[subport profile 0] tb rate = 1250000000 ; Bytes per second tb size = 1000000 ; Bytes From patchwork Thu Sep 17 08:42:36 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78025 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 91EF3A04B6; Thu, 17 Sep 2020 10:43:42 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 66BA71D5C4; Thu, 17 Sep 2020 10:43:15 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 0D7D11D5AC for ; Thu, 17 Sep 2020 10:42:55 +0200 (CEST) IronPort-SDR: 4ZVrPsJF3fNBKDuWiYWylPAOXPxO9xsNHKjtrmNwgdqHtlsSROvO0m+QAp82r3D6guSAVd53LY q1AbLurhEXlA== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060497" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060497" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:55 -0700 IronPort-SDR: FEEWBFHXto4B10GMuSkQo55eU5qy9+m21GcD5xOWR09ACPKgvce8cVMwVLJQe687I3iVPG6Pj2 ZfInSHuKSHHQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385698" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:54 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:36 +0100 Message-Id: <1600332159-26018-7-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 6/9] example/ip_pipeline: add dynamic config of subport X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Modify the ip_pipeline application to build the hierarchical scheduler with default subport bandwidth profile. It also allows to configure a subport with different subport bandwidth profile dynamically Signed-off-by: Savinay Dharmappa --- examples/ip_pipeline/cli.c | 14 ++++++++------ examples/ip_pipeline/tmgr.c | 26 ++++++++++++++++++++++++-- examples/ip_pipeline/tmgr.h | 3 ++- 3 files changed, 34 insertions(+), 9 deletions(-) diff --git a/examples/ip_pipeline/cli.c b/examples/ip_pipeline/cli.c index d79699e..7eccde3 100644 --- a/examples/ip_pipeline/cli.c +++ b/examples/ip_pipeline/cli.c @@ -407,6 +407,7 @@ cmd_tmgr_subport_profile(char **tokens, size_t out_size) { struct rte_sched_subport_params p; + struct rte_sched_subport_profile_params pp; int status, i; if (n_tokens != 35) { @@ -414,23 +415,23 @@ cmd_tmgr_subport_profile(char **tokens, return; } - if (parser_read_uint64(&p.tb_rate, tokens[3]) != 0) { + if (parser_read_uint64(&pp.tb_rate, tokens[3]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate"); return; } - if (parser_read_uint64(&p.tb_size, tokens[4]) != 0) { + if (parser_read_uint64(&pp.tb_size, tokens[4]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "tb_size"); return; } for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) - if (parser_read_uint64(&p.tc_rate[i], tokens[5 + i]) != 0) { + if (parser_read_uint64(&pp.tc_rate[i], tokens[5 + i]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "tc_rate"); return; } - if (parser_read_uint64(&p.tc_period, tokens[18]) != 0) { + if (parser_read_uint64(&pp.tc_period, tokens[18]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "tc_period"); return; } @@ -440,7 +441,8 @@ cmd_tmgr_subport_profile(char **tokens, return; } - if (parser_read_uint32(&p.n_pipes_per_subport_enabled, tokens[20]) != 0) { + if (parser_read_uint32(&p.n_pipes_per_subport_enabled, + tokens[20]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport"); return; } @@ -456,7 +458,7 @@ cmd_tmgr_subport_profile(char **tokens, return; } - status = tmgr_subport_profile_add(&p); + status = tmgr_subport_profile_add(&p, &pp); if (status != 0) { snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); return; diff --git a/examples/ip_pipeline/tmgr.c b/examples/ip_pipeline/tmgr.c index 91ccbf6..5c3df92 100644 --- a/examples/ip_pipeline/tmgr.c +++ b/examples/ip_pipeline/tmgr.c @@ -11,6 +11,9 @@ static struct rte_sched_subport_params subport_profile[TMGR_SUBPORT_PROFILE_MAX]; +static struct rte_sched_subport_profile_params + profile_params[TMGR_SUBPORT_PROFILE_MAX]; + static uint32_t n_subport_profiles; static struct rte_sched_pipe_params @@ -44,10 +47,11 @@ tmgr_port_find(const char *name) } int -tmgr_subport_profile_add(struct rte_sched_subport_params *p) +tmgr_subport_profile_add(struct rte_sched_subport_params *p, + struct rte_sched_subport_profile_params *pp) { /* Check input params */ - if (p == NULL || + if (p == NULL || pp == NULL || p->n_pipes_per_subport_enabled == 0) return -1; @@ -56,6 +60,10 @@ tmgr_subport_profile_add(struct rte_sched_subport_params *p) p, sizeof(*p)); + memcpy(&profile_params[n_subport_profiles], + pp, + sizeof(*pp)); + n_subport_profiles++; return 0; @@ -103,6 +111,9 @@ tmgr_port_create(const char *name, struct tmgr_port_params *params) p.mtu = params->mtu; p.frame_overhead = params->frame_overhead; p.n_subports_per_port = params->n_subports_per_port; + p.n_subport_profiles = n_subport_profiles; + p.subport_profiles = profile_params; + p.n_max_subport_profiles = TMGR_SUBPORT_PROFILE_MAX; p.n_pipes_per_subport = TMGR_PIPE_SUBPORT_MAX; s = rte_sched_port_config(&p); @@ -126,6 +137,13 @@ tmgr_port_create(const char *name, struct tmgr_port_params *params) return NULL; } + status = rte_sched_subport_profile_config(s, i, 0); + + if (status) { + rte_sched_port_free(s); + return NULL; + } + for (j = 0; j < subport_profile[0].n_pipes_per_subport_enabled; j++) { status = rte_sched_pipe_config( s, @@ -182,6 +200,10 @@ tmgr_subport_config(const char *port_name, subport_id, &subport_profile[subport_profile_id]); + if (!status) + status = rte_sched_subport_profile_config(port->s, subport_id, + subport_profile_id); + return status; } diff --git a/examples/ip_pipeline/tmgr.h b/examples/ip_pipeline/tmgr.h index ee50cf7..8c14523 100644 --- a/examples/ip_pipeline/tmgr.h +++ b/examples/ip_pipeline/tmgr.h @@ -48,7 +48,8 @@ struct tmgr_port_params { }; int -tmgr_subport_profile_add(struct rte_sched_subport_params *p); +tmgr_subport_profile_add(struct rte_sched_subport_params *p, + struct rte_sched_subport_profile_params *pp); int tmgr_pipe_profile_add(struct rte_sched_pipe_params *p); From patchwork Thu Sep 17 08:42:37 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78026 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 023CEA04B6; Thu, 17 Sep 2020 10:43:52 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 044471D5C7; Thu, 17 Sep 2020 10:43:17 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id E09891D5B5 for ; Thu, 17 Sep 2020 10:42:57 +0200 (CEST) IronPort-SDR: ZrNfKXpLKKO4MZYErTcsIZpgu3nIelhGCoPi5W0Ziee/GR4w4Bo4RzmYj6x4fiD66sTA8I2v3H O/keV4IZ0tNg== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060500" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060500" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:57 -0700 IronPort-SDR: TEs+6qBx/4Ahcy1ps2C2VsHsl0ibQYWNLIdpZFIME5NRz9SfLu+TZ7KiyWNoyjhpWUQWuFAghV pDJgcZa7+C+A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385706" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:55 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:37 +0100 Message-Id: <1600332159-26018-8-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 7/9] drivers/softnic: add dynamic config of subport X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Modify the softnic drivers to build the hierarchical scheduler with default subport bandwidth profile. It also allows to configure a subport with different subport bandwidth profile dynamically. Signed-off-by: Savinay Dharmappa --- drivers/net/softnic/rte_eth_softnic_internals.h | 9 + drivers/net/softnic/rte_eth_softnic_tm.c | 223 +++++++++++++++++++----- 2 files changed, 187 insertions(+), 45 deletions(-) diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h index 6eec43b..cc50037 100644 --- a/drivers/net/softnic/rte_eth_softnic_internals.h +++ b/drivers/net/softnic/rte_eth_softnic_internals.h @@ -164,10 +164,19 @@ TAILQ_HEAD(softnic_link_list, softnic_link); #ifndef TM_MAX_PIPE_PROFILE #define TM_MAX_PIPE_PROFILE 256 #endif + +#ifndef TM_MAX_SUBPORT_PROFILE +#define TM_MAX_SUBPORT_PROFILE 256 +#endif + struct tm_params { struct rte_sched_port_params port_params; struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS]; + struct rte_sched_subport_profile_params + subport_profiles[TM_MAX_SUBPORT_PROFILE]; + uint32_t n_subport_profiles; + uint32_t subport_to_profile[TM_MAX_SUBPORT_PROFILE]; struct rte_sched_pipe_params pipe_profiles[TM_MAX_PIPE_PROFILE]; uint32_t n_pipe_profiles; diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c index 80a470c..a223655 100644 --- a/drivers/net/softnic/rte_eth_softnic_tm.c +++ b/drivers/net/softnic/rte_eth_softnic_tm.c @@ -98,6 +98,13 @@ softnic_tmgr_port_create(struct pmd_internals *p, return NULL; } + status = rte_sched_subport_profile_config(sched, + subport_id, t->subport_to_profile[subport_id]); + if (status) { + rte_sched_port_free(sched); + return NULL; + } + /* Pipe */ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) { int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id; @@ -1043,6 +1050,25 @@ tm_shared_shaper_get_tc(struct rte_eth_dev *dev, } static int +subport_profile_exists(struct rte_eth_dev *dev, + struct rte_sched_subport_profile_params *sp, + uint32_t *subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + uint32_t i; + + for (i = 0; i < t->n_subport_profiles; i++) + if (memcmp(&t->subport_profiles[i], sp, sizeof(*sp)) == 0) { + if (subport_profile_id) + *subport_profile_id = i; + return 1; + } + + return 0; +} + +static int update_subport_tc_rate(struct rte_eth_dev *dev, struct tm_node *nt, struct tm_shared_shaper *ss, @@ -1050,26 +1076,27 @@ update_subport_tc_rate(struct rte_eth_dev *dev, { struct pmd_internals *p = dev->data->dev_private; uint32_t tc_id = tm_node_tc_id(dev, nt); - struct tm_node *np = nt->parent_node; - struct tm_node *ns = np->parent_node; uint32_t subport_id = tm_node_subport_id(dev, ns); - - struct rte_sched_subport_params subport_params; - + struct rte_sched_subport_profile_params subport_profile; struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev, ss->shaper_profile_id); + uint32_t subport_profile_id; /* Derive new subport configuration. */ - memcpy(&subport_params, - &p->soft.tm.params.subport_params[subport_id], - sizeof(subport_params)); - subport_params.tc_rate[tc_id] = sp_new->params.peak.rate; + memcpy(&subport_profile, + &p->soft.tm.params.subport_profiles[subport_id], + sizeof(subport_profile)); + subport_profile.tc_rate[tc_id] = sp_new->params.peak.rate; + + if (subport_profile_exists(dev, &subport_profile, + &subport_profile_id) == 0) + return -1; /* Update the subport configuration. */ - if (rte_sched_subport_config(SCHED(p), - subport_id, &subport_params)) + if (rte_sched_subport_profile_config(SCHED(p), + subport_id, subport_profile_id)) return -1; /* Commit changes. */ @@ -1078,9 +1105,9 @@ update_subport_tc_rate(struct rte_eth_dev *dev, ss->shaper_profile_id = sp_new->shaper_profile_id; sp_new->n_users++; - memcpy(&p->soft.tm.params.subport_params[subport_id], - &subport_params, - sizeof(subport_params)); + memcpy(&p->soft.tm.params.subport_profiles[subport_id], + &subport_profile, + sizeof(subport_profile)); return 0; } @@ -2190,6 +2217,108 @@ pipe_profiles_generate(struct rte_eth_dev *dev) return 0; } +static struct rte_sched_subport_profile_params * +subport_profile_get(struct rte_eth_dev *dev, struct tm_node *np) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + uint32_t subport_id = tm_node_subport_id(dev, np->parent_node); + + return &t->subport_profiles[subport_id]; +} + +static void +subport_profile_mark(struct rte_eth_dev *dev, + uint32_t subport_id, + uint32_t subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + t->subport_to_profile[subport_id] = subport_profile_id; +} + +static void +subport_profile_install(struct rte_eth_dev *dev, + struct rte_sched_subport_profile_params *sp, + uint32_t subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + memcpy(&t->subport_profiles[subport_profile_id], sp, sizeof(*sp)); + t->n_subport_profiles++; +} + +static int +subport_profile_free_exists(struct rte_eth_dev *dev, + uint32_t *subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + if (t->n_subport_profiles < TM_MAX_SUBPORT_PROFILE) { + *subport_profile_id = t->n_subport_profiles; + return 1; + } + + return 0; +} + +static void +subport_profile_build(struct tm_node *np, + struct rte_sched_subport_profile_params *sp) +{ + memset(sp, 0, sizeof(*sp)); + + /* Pipe */ + sp->tb_rate = np->shaper_profile->params.peak.rate; + sp->tb_size = np->shaper_profile->params.peak.size; + + /* Traffic Class (TC) */ + sp->tc_period = SUBPORT_TC_PERIOD; +} + +static int +subport_profiles_generate(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_node_list *nl = &h->nodes; + struct tm_node *ns; + uint32_t subport_id; + + /* Objective: Fill in the following fields in struct tm_params: + * - subport_profiles + * - n_subport_profiles + * - subport_to_profile + */ + + subport_id = 0; + TAILQ_FOREACH(ns, nl, node) { + if (ns->level != TM_NODE_LEVEL_SUBPORT) + continue; + + struct rte_sched_subport_profile_params sp; + uint32_t pos; + + subport_profile_build(ns, &sp); + + if (!subport_profile_exists(dev, &sp, &pos)) { + if (!subport_profile_free_exists(dev, &pos)) + return -1; + + subport_profile_install(dev, &sp, pos); + } + + subport_profile_mark(dev, subport_id, pos); + + subport_id++; + } + + return 0; +} + static struct tm_wred_profile * tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id) { @@ -2447,6 +2576,15 @@ hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error) rte_strerror(EINVAL)); } + /* Not too many subport profiles. */ + if (subport_profiles_generate(dev)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + /* Not too many pipe profiles. */ if (pipe_profiles_generate(dev)) return -rte_tm_error_set(error, @@ -2528,6 +2666,9 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev) .frame_overhead = root->shaper_profile->params.pkt_length_adjust, .n_subports_per_port = root->n_children, + .n_subport_profiles = t->n_subport_profiles, + .subport_profiles = t->subport_profiles, + .n_max_subport_profiles = TM_MAX_SUBPORT_PROFILE, .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT, }; @@ -2548,28 +2689,11 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev) ss->shaper_profile_id) : n->shaper_profile; tc_rate[i] = sp->params.peak.rate; + t->subport_profiles[subport_id].tc_rate[i] = tc_rate[i]; } t->subport_params[subport_id] = (struct rte_sched_subport_params) { - .tb_rate = n->shaper_profile->params.peak.rate, - .tb_size = n->shaper_profile->params.peak.size, - - .tc_rate = {tc_rate[0], - tc_rate[1], - tc_rate[2], - tc_rate[3], - tc_rate[4], - tc_rate[5], - tc_rate[6], - tc_rate[7], - tc_rate[8], - tc_rate[9], - tc_rate[10], - tc_rate[11], - tc_rate[12], - }, - .tc_period = SUBPORT_TC_PERIOD, .n_pipes_per_subport_enabled = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT], @@ -2829,30 +2953,39 @@ update_subport_rate(struct rte_eth_dev *dev, struct pmd_internals *p = dev->data->dev_private; uint32_t subport_id = tm_node_subport_id(dev, ns); - struct rte_sched_subport_params subport_params; + struct rte_sched_subport_profile_params *profile0 = + subport_profile_get(dev, ns); + struct rte_sched_subport_profile_params profile1; + uint32_t subport_profile_id; - /* Derive new subport configuration. */ - memcpy(&subport_params, - &p->soft.tm.params.subport_params[subport_id], - sizeof(subport_params)); - subport_params.tb_rate = sp->params.peak.rate; - subport_params.tb_size = sp->params.peak.size; + /* Derive new pipe profile. */ + memcpy(&profile1, profile0, sizeof(profile1)); + profile1.tb_rate = sp->params.peak.rate; + profile1.tb_size = sp->params.peak.size; + + /* Since implementation does not allow adding more subport profiles + * after port configuration, the pipe configuration can be successfully + * updated only if the new profile is also part of the existing set of + * pipe profiles. + */ + if (subport_profile_exists(dev, &profile1, &subport_profile_id) == 0) + return -1; /* Update the subport configuration. */ - if (rte_sched_subport_config(SCHED(p), subport_id, - &subport_params)) + if (rte_sched_subport_profile_config(SCHED(p), subport_id, + subport_profile_id)) return -1; + subport_profile_mark(dev, subport_id, subport_profile_id); /* Commit changes. */ ns->shaper_profile->n_users--; - ns->shaper_profile = sp; ns->params.shaper_profile_id = sp->shaper_profile_id; sp->n_users++; - memcpy(&p->soft.tm.params.subport_params[subport_id], - &subport_params, - sizeof(subport_params)); + memcpy(&p->soft.tm.params.subport_profiles[subport_id], + &profile1, + sizeof(profile1)); return 0; } From patchwork Thu Sep 17 08:42:38 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78027 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9D0C8A04B6; Thu, 17 Sep 2020 10:43:59 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 43F6E1D5CB; Thu, 17 Sep 2020 10:43:18 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 630081D5B5 for ; Thu, 17 Sep 2020 10:42:59 +0200 (CEST) IronPort-SDR: TS5MCqA/KG6UKKSaoT3p5jWuIl7H6dyOj6j9TFdukeXs0Clba71O3PXhrkEl3yCouYYcsu3Etu UtLEF6b1Nx2w== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060502" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060502" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:42:58 -0700 IronPort-SDR: 4IWQdINc42ZLhMl0D4kIz+DpKqKg1KQqWnWWM7jlbkLmebbBITFPB6S0qn2W2BOCBb+6LNAXWy zZM3jMh1ZzGA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385717" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:57 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:38 +0100 Message-Id: <1600332159-26018-9-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 8/9] app/test_sched: add dynamic config of subport X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Modify the test_sched application to build the hierarchical scheduler with default subport bandwidth profile. It also allows to configure a subport with different subport bandwidth profile dynamically Signed-off-by: Savinay Dharmappa --- app/test/test_sched.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/app/test/test_sched.c b/app/test/test_sched.c index fc31080..8d1eb25 100644 --- a/app/test/test_sched.c +++ b/app/test/test_sched.c @@ -21,6 +21,7 @@ #define PIPE 1 #define TC 2 #define QUEUE 0 +#define MAX_SCHED_SUBPORT_PROFILES 8 static struct rte_sched_pipe_params pipe_profile[] = { { /* Profile #0 */ @@ -36,15 +37,20 @@ static struct rte_sched_pipe_params pipe_profile[] = { }, }; -static struct rte_sched_subport_params subport_param[] = { +static struct rte_sched_subport_profile_params + subport_profile[] = { { .tb_rate = 1250000000, .tb_size = 1000000, - .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 1250000000}, .tc_period = 10, + }, +}; + +static struct rte_sched_subport_params subport_param[] = { + { .n_pipes_per_subport_enabled = 1024, .qsize = {32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32}, .pipe_profiles = pipe_profile, @@ -59,6 +65,9 @@ static struct rte_sched_port_params port_param = { .mtu = 1522, .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT, .n_subports_per_port = 1, + .n_subport_profiles = 1, + .subport_profiles = subport_profile, + .n_max_subport_profiles = MAX_SCHED_SUBPORT_PROFILES, .n_pipes_per_subport = 1024, }; @@ -66,6 +75,7 @@ static struct rte_sched_port_params port_param = { #define MBUF_DATA_SZ (2048 + RTE_PKTMBUF_HEADROOM) #define MEMPOOL_CACHE_SZ 0 #define SOCKET 0 +#define DEFAULT_PROFILE 0 static struct rte_mempool * @@ -141,6 +151,10 @@ test_sched(void) err = rte_sched_subport_config(port, SUBPORT, subport_param); TEST_ASSERT_SUCCESS(err, "Error config sched, err=%d\n", err); + err = rte_sched_subport_profile_config(port, SUBPORT, + DEFAULT_PROFILE); + TEST_ASSERT_SUCCESS(err, "Error config sched, err=%d\n", err); + for (pipe = 0; pipe < subport_param[0].n_pipes_per_subport_enabled; pipe++) { err = rte_sched_pipe_config(port, SUBPORT, pipe, 0); TEST_ASSERT_SUCCESS(err, "Error config sched pipe %u, err=%d\n", pipe, err); From patchwork Thu Sep 17 08:42:39 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Savinay Dharmappa X-Patchwork-Id: 78028 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E3A1CA04B6; Thu, 17 Sep 2020 10:44:09 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7AF291D5D2; Thu, 17 Sep 2020 10:43:19 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id B81321D5C3 for ; Thu, 17 Sep 2020 10:43:00 +0200 (CEST) IronPort-SDR: ckn/OuJ0rZhqGKcr8wPZ0Mt1Qa480eLvPtKYYrS+kImDpFg9EOIX9K4PR2Vtp+Me1mt0AjuTG9 EnjEkSAfuPFw== X-IronPort-AV: E=McAfee;i="6000,8403,9746"; a="157060505" X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="157060505" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2020 01:43:00 -0700 IronPort-SDR: pG547GrFb7cKuSmToOQW5obLz/Y1fJdE0mb0uzfwlvKA79ppBJNRLrRRd4VH6iej3pG/jLCTj8 OdzgAV/PP0wA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,436,1592895600"; d="scan'208";a="307385727" Received: from silpixa00400629.ir.intel.com ([10.237.214.135]) by orsmga006.jf.intel.com with ESMTP; 17 Sep 2020 01:42:58 -0700 From: Savinay Dharmappa To: jasvinder.singh@intel.com, cristian.dumitrescu@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Thu, 17 Sep 2020 09:42:39 +0100 Message-Id: <1600332159-26018-10-git-send-email-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> References: <1600274633-371993-1-git-send-email-savinay.dharmappa@intel.com> <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v4 9/9] sched: remove the redundant code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Remove redundant data structure fields references from functions and subport level data structures. It also update the release and deprecation note. Signed-off-by: Savinay Dharmappa --- doc/guides/rel_notes/deprecation.rst | 6 -- doc/guides/rel_notes/release_20_11.rst | 4 ++ lib/librte_sched/rte_sched.c | 115 +-------------------------------- lib/librte_sched/rte_sched.h | 12 ---- 4 files changed, 6 insertions(+), 131 deletions(-) diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst index 1f888fa..72eb190 100644 --- a/doc/guides/rel_notes/deprecation.rst +++ b/doc/guides/rel_notes/deprecation.rst @@ -294,12 +294,6 @@ Deprecation Notices in "rte_sched.h". These changes are aligned to improvements suggested in the RFC https://mails.dpdk.org/archives/dev/2018-November/120035.html. -* sched: To allow dynamic configuration of the subport bandwidth profile, - changes will be made to data structures ``rte_sched_subport_params``, - ``rte_sched_port_params`` and new data structure, API functions will be - defined in ``rte_sched.h``. These changes are aligned as suggested in the - RFC https://mails.dpdk.org/archives/dev/2020-July/175161.html - * metrics: The function ``rte_metrics_init`` will have a non-void return in order to notify errors instead of calling ``rte_exit``. diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index cc72609..9b05913 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -68,6 +68,10 @@ Removed Items Also, make sure to start the actual text at the margin. ======================================================= +* sched: The subport bandwidth configuration parameters such as tb_rate, + tc_rate, tc_period etc., are moved from subport level data structure to + new a data structure. This allows to configure a subport with different + subport bandwidth configuration dynamically. API Changes ----------- diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 3f61afa..6b6892d 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -152,16 +152,11 @@ struct rte_sched_grinder { struct rte_sched_subport { /* Token bucket (TB) */ uint64_t tb_time; /* time of last update */ - uint64_t tb_period; - uint64_t tb_credits_per_period; - uint64_t tb_size; uint64_t tb_credits; /* Traffic classes (TCs) */ uint64_t tc_time; /* time of next update */ - uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint64_t tc_period; /* TC oversubscription */ uint64_t tc_ov_wm; @@ -837,18 +832,6 @@ rte_sched_subport_check_params(struct rte_sched_subport_params *params, return -EINVAL; } - if (params->tb_rate == 0 || params->tb_rate > rate) { - RTE_LOG(ERR, SCHED, - "%s: Incorrect value for tb rate\n", __func__); - return -EINVAL; - } - - if (params->tb_size == 0) { - RTE_LOG(ERR, SCHED, - "%s: Incorrect value for tb size\n", __func__); - return -EINVAL; - } - /* qsize: if non-zero, power of 2, * no bigger than 32K (due to 16-bit read/write pointers) */ @@ -862,29 +845,8 @@ rte_sched_subport_check_params(struct rte_sched_subport_params *params, } } - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - uint64_t tc_rate = params->tc_rate[i]; - uint16_t qsize = params->qsize[i]; - - if ((qsize == 0 && tc_rate != 0) || - (qsize != 0 && tc_rate == 0) || - (tc_rate > params->tb_rate)) { - RTE_LOG(ERR, SCHED, - "%s: Incorrect value for tc rate\n", __func__); - return -EINVAL; - } - } - - if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 || - params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { - RTE_LOG(ERR, SCHED, - "%s: Incorrect qsize or tc rate(best effort)\n", __func__); - return -EINVAL; - } - - if (params->tc_period == 0) { - RTE_LOG(ERR, SCHED, - "%s: Incorrect value for tc period\n", __func__); + if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) { + RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__); return -EINVAL; } @@ -1101,48 +1063,6 @@ rte_sched_port_free(struct rte_sched_port *port) } static void -rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i) -{ - struct rte_sched_subport *s = port->subports[i]; - - RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n" - " Token bucket: period = %"PRIu64", credits per period = %"PRIu64 - ", size = %"PRIu64"\n" - " Traffic classes: period = %"PRIu64"\n" - " credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 - ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64 - ", %"PRIu64", %"PRIu64", %"PRIu64"]\n" - " Best effort traffic class oversubscription: wm min = %"PRIu64 - ", wm max = %"PRIu64"\n", - i, - - /* Token bucket */ - s->tb_period, - s->tb_credits_per_period, - s->tb_size, - - /* Traffic classes */ - s->tc_period, - s->tc_credits_per_period[0], - s->tc_credits_per_period[1], - s->tc_credits_per_period[2], - s->tc_credits_per_period[3], - s->tc_credits_per_period[4], - s->tc_credits_per_period[5], - s->tc_credits_per_period[6], - s->tc_credits_per_period[7], - s->tc_credits_per_period[8], - s->tc_credits_per_period[9], - s->tc_credits_per_period[10], - s->tc_credits_per_period[11], - s->tc_credits_per_period[12], - - /* Best effort traffic class oversubscription */ - s->tc_ov_wm_min, - s->tc_ov_wm_max); -} - -static void rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports) { uint32_t i; @@ -1215,33 +1135,7 @@ rte_sched_subport_config(struct rte_sched_port *port, /* Port */ port->subports[subport_id] = s; - /* Token Bucket (TB) */ - if (params->tb_rate == port->rate) { - s->tb_credits_per_period = 1; - s->tb_period = 1; - } else { - double tb_rate = ((double) params->tb_rate) / ((double) port->rate); - double d = RTE_SCHED_TB_RATE_CONFIG_ERR; - - rte_approx_64(tb_rate, d, &s->tb_credits_per_period, &s->tb_period); - } - - s->tb_size = params->tb_size; s->tb_time = port->time; - s->tb_credits = s->tb_size / 2; - - /* Traffic Classes (TCs) */ - s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate); - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - if (params->qsize[i]) - s->tc_credits_per_period[i] - = rte_sched_time_ms_to_bytes(params->tc_period, - params->tc_rate[i]); - } - s->tc_time = port->time + s->tc_period; - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) - if (params->qsize[i]) - s->tc_credits[i] = s->tc_credits_per_period[i]; /* compile time checks */ RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0); @@ -1331,17 +1225,12 @@ rte_sched_subport_config(struct rte_sched_port *port, #ifdef RTE_SCHED_SUBPORT_TC_OV /* TC oversubscription */ s->tc_ov_wm_min = port->mtu; - s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period, - s->pipe_tc_be_rate_max); s->tc_ov_wm = s->tc_ov_wm_max; s->tc_ov_period_id = 0; s->tc_ov = 0; s->tc_ov_n = 0; s->tc_ov_rate = 0; #endif - - rte_sched_port_log_subport_config(port, subport_id); - return 0; } diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h index a7c2638..7623919 100644 --- a/lib/librte_sched/rte_sched.h +++ b/lib/librte_sched/rte_sched.h @@ -149,18 +149,6 @@ struct rte_sched_pipe_params { * byte. */ struct rte_sched_subport_params { - /** Token bucket rate (measured in bytes per second) */ - uint64_t tb_rate; - - /** Token bucket size (measured in credits) */ - uint64_t tb_size; - - /** Traffic class rates (measured in bytes per second) */ - uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - - /** Enforcement period for rates (measured in milliseconds) */ - uint64_t tc_period; - /** Number of subport pipes. * The subport can enable/allocate fewer pipes than the maximum * number set through struct port_params::n_max_pipes_per_subport,