From patchwork Tue May 24 13:43:32 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Danilewicz, MarcinX" X-Patchwork-Id: 111737 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E5FCCA04FF; Tue, 24 May 2022 15:43:42 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D6D0E40140; Tue, 24 May 2022 15:43:42 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id E0E30400D6 for ; Tue, 24 May 2022 15:43:40 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1653399821; x=1684935821; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=f8WYgSaKS1K6cXRHw/J4/I5lQ+MOThdR8M27hWeAzao=; b=ZXYJWC+r7CPSPNrsLLSFVhapJ/aKf1QTPPPIdwUAd3LMxEF1hB1a+zQP YJHq84UarT7mgDvUKDpDi+oS94npOvCJH/o5e9la/OXtnnKkZW49Zgq9g Yp2eB4fXVdvPaWXw0s34YA5i6UjMUGtVNTjEuNF1LIVVquIoxeuZh4YoB KSd62OYEftq+EIEaQM6NT4sjJqOH3Yd99yaXRaGtan7HdxRc0qgGYjCr5 ZmaWn6cqvZY2OEv/elIYENj8y44vFrNjXI7rwdabydKH5qjK/nhZcH2xV pFgxEPE8E/HnK4Ces10lICXUOCEmsNJVtDQZGW8CbtDr3kNbv652T+NxS Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10356"; a="359925817" X-IronPort-AV: E=Sophos;i="5.91,248,1647327600"; d="scan'208";a="359925817" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 24 May 2022 06:43:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.91,248,1647327600"; d="scan'208";a="601309724" Received: from silpixa00400629.ir.intel.com ([10.237.213.88]) by orsmga008.jf.intel.com with ESMTP; 24 May 2022 06:43:37 -0700 From: Marcin Danilewicz To: dev@dpdk.org, jasvinder.singh@intel.com, cristian.dumitrescu@intel.com Cc: megha.ajmera@intel.com Subject: [PATCH v4] sched: enable traffic class oversubscription conditionally Date: Tue, 24 May 2022 13:43:32 +0000 Message-Id: <20220524134332.644318-1-marcinx.danilewicz@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220427092357.491720-1-marcinx.danilewicz@intel.com> References: <20220427092357.491720-1-marcinx.danilewicz@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Added new API to enable or disable TC over subscription for best effort traffic class at subport level. Added changes after review and increased throughput. By default TC OV is disabled. History: - v1 - TC OV disabled by default - v2 - throughput improvements - v3, v4 - changes from comments Signed-off-by: Marcin Danilewicz --- lib/sched/rte_sched.c | 189 +++++++++++++++++++++++++++++++++++------- lib/sched/rte_sched.h | 18 ++++ lib/sched/version.map | 3 + 3 files changed, 178 insertions(+), 32 deletions(-) diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index ec74bee939..6e7d81df46 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -213,6 +213,9 @@ struct rte_sched_subport { uint8_t *bmp_array; struct rte_mbuf **queue_array; uint8_t memory[0] __rte_cache_aligned; + + /* TC oversubscription activation */ + int is_tc_ov_enabled; } __rte_cache_aligned; struct rte_sched_port { @@ -1165,6 +1168,45 @@ rte_sched_cman_config(struct rte_sched_port *port, } #endif +int +rte_sched_subport_tc_ov_config(struct rte_sched_port *port, + uint32_t subport_id, + bool tc_ov_enable) +{ + struct rte_sched_subport *s; + struct rte_sched_subport_profile *profile; + + if (port == NULL) { + RTE_LOG(ERR, SCHED, + "%s: Incorrect value for parameter port\n", __func__); + return -EINVAL; + } + + if (subport_id >= port->n_subports_per_port) { + RTE_LOG(ERR, SCHED, + "%s: Incorrect value for parameter subport id\n", __func__); + return -EINVAL; + } + + s = port->subports[subport_id]; + s->is_tc_ov_enabled = tc_ov_enable ? 1 : 0; + + if (s->is_tc_ov_enabled) { + /* TC oversubscription */ + s->tc_ov_wm_min = port->mtu; + s->tc_ov_period_id = 0; + s->tc_ov = 0; + s->tc_ov_n = 0; + s->tc_ov_rate = 0; + + profile = port->subport_profiles + s->profile; + s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period, + s->pipe_tc_be_rate_max); + s->tc_ov_wm = s->tc_ov_wm_max; + } + return 0; +} + int rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, @@ -1254,6 +1296,9 @@ rte_sched_subport_config(struct rte_sched_port *port, s->n_pipe_profiles = params->n_pipe_profiles; s->n_max_pipe_profiles = params->n_max_pipe_profiles; + /* TC over-subscription is disabled by default */ + s->is_tc_ov_enabled = 0; + #ifdef RTE_SCHED_CMAN if (params->cman_params != NULL) { s->cman_enabled = true; @@ -1316,13 +1361,6 @@ rte_sched_subport_config(struct rte_sched_port *port, for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID; - - /* TC oversubscription */ - s->tc_ov_wm_min = port->mtu; - s->tc_ov_period_id = 0; - s->tc_ov = 0; - s->tc_ov_n = 0; - s->tc_ov_rate = 0; } { @@ -1342,9 +1380,6 @@ rte_sched_subport_config(struct rte_sched_port *port, else profile->tc_credits_per_period[i] = 0; - s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period, - s->pipe_tc_be_rate_max); - s->tc_ov_wm = s->tc_ov_wm_max; s->profile = subport_profile_id; } @@ -1417,17 +1452,20 @@ rte_sched_pipe_config(struct rte_sched_port *port, double pipe_tc_be_rate = (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] / (double) params->tc_period; - uint32_t tc_be_ov = s->tc_ov; - /* Unplug pipe from its subport */ - s->tc_ov_n -= params->tc_ov_weight; - s->tc_ov_rate -= pipe_tc_be_rate; - s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; + if (s->is_tc_ov_enabled) { + uint32_t tc_be_ov = s->tc_ov; - if (s->tc_ov != tc_be_ov) { - RTE_LOG(DEBUG, SCHED, - "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n", - subport_id, subport_tc_be_rate, s->tc_ov_rate); + /* Unplug pipe from its subport */ + s->tc_ov_n -= params->tc_ov_weight; + s->tc_ov_rate -= pipe_tc_be_rate; + s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; + + if (s->tc_ov != tc_be_ov) { + RTE_LOG(DEBUG, SCHED, + "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n", + subport_id, subport_tc_be_rate, s->tc_ov_rate); + } } /* Reset the pipe */ @@ -1460,19 +1498,22 @@ rte_sched_pipe_config(struct rte_sched_port *port, double pipe_tc_be_rate = (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] / (double) params->tc_period; - uint32_t tc_be_ov = s->tc_ov; - s->tc_ov_n += params->tc_ov_weight; - s->tc_ov_rate += pipe_tc_be_rate; - s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; + if (s->is_tc_ov_enabled) { + uint32_t tc_be_ov = s->tc_ov; + + s->tc_ov_n += params->tc_ov_weight; + s->tc_ov_rate += pipe_tc_be_rate; + s->tc_ov = s->tc_ov_rate > subport_tc_be_rate; - if (s->tc_ov != tc_be_ov) { - RTE_LOG(DEBUG, SCHED, - "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n", - subport_id, subport_tc_be_rate, s->tc_ov_rate); + if (s->tc_ov != tc_be_ov) { + RTE_LOG(DEBUG, SCHED, + "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n", + subport_id, subport_tc_be_rate, s->tc_ov_rate); + } + p->tc_ov_period_id = s->tc_ov_period_id; + p->tc_ov_credits = s->tc_ov_wm; } - p->tc_ov_period_id = s->tc_ov_period_id; - p->tc_ov_credits = s->tc_ov_wm; } return 0; @@ -2318,6 +2359,45 @@ grinder_credits_update(struct rte_sched_port *port, pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); pipe->tb_time += n_periods * params->tb_period; + /* Subport TCs */ + if (unlikely(port->time >= subport->tc_time)) { + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + subport->tc_credits[i] = sp->tc_credits_per_period[i]; + + subport->tc_time = port->time + sp->tc_period; + } + + /* Pipe TCs */ + if (unlikely(port->time >= pipe->tc_time)) { + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + pipe->tc_credits[i] = params->tc_credits_per_period[i]; + pipe->tc_time = port->time + params->tc_period; + } +} + +static inline void +grinder_credits_update_with_tc_ov(struct rte_sched_port *port, + struct rte_sched_subport *subport, uint32_t pos) +{ + struct rte_sched_grinder *grinder = subport->grinder + pos; + struct rte_sched_pipe *pipe = grinder->pipe; + struct rte_sched_pipe_profile *params = grinder->pipe_params; + struct rte_sched_subport_profile *sp = grinder->subport_params; + uint64_t n_periods; + uint32_t i; + + /* Subport TB */ + n_periods = (port->time - subport->tb_time) / sp->tb_period; + subport->tb_credits += n_periods * sp->tb_credits_per_period; + subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); + subport->tb_time += n_periods * sp->tb_period; + + /* Pipe TB */ + n_periods = (port->time - pipe->tb_time) / params->tb_period; + pipe->tb_credits += n_periods * params->tb_credits_per_period; + pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); + pipe->tb_time += n_periods * params->tb_period; + /* Subport TCs */ if (unlikely(port->time >= subport->tc_time)) { subport->tc_ov_wm = @@ -2348,6 +2428,39 @@ grinder_credits_update(struct rte_sched_port *port, static inline int grinder_credits_check(struct rte_sched_port *port, struct rte_sched_subport *subport, uint32_t pos) +{ + struct rte_sched_grinder *grinder = subport->grinder + pos; + struct rte_sched_pipe *pipe = grinder->pipe; + struct rte_mbuf *pkt = grinder->pkt; + uint32_t tc_index = grinder->tc_index; + uint64_t pkt_len = pkt->pkt_len + port->frame_overhead; + uint64_t subport_tb_credits = subport->tb_credits; + uint64_t subport_tc_credits = subport->tc_credits[tc_index]; + uint64_t pipe_tb_credits = pipe->tb_credits; + uint64_t pipe_tc_credits = pipe->tc_credits[tc_index]; + int enough_credits; + + /* Check pipe and subport credits */ + enough_credits = (pkt_len <= subport_tb_credits) && + (pkt_len <= subport_tc_credits) && + (pkt_len <= pipe_tb_credits) && + (pkt_len <= pipe_tc_credits); + + if (!enough_credits) + return 0; + + /* Update pipe and subport credits */ + subport->tb_credits -= pkt_len; + subport->tc_credits[tc_index] -= pkt_len; + pipe->tb_credits -= pkt_len; + pipe->tc_credits[tc_index] -= pkt_len; + + return 1; +} + +static inline int +grinder_credits_check_with_tc_ov(struct rte_sched_port *port, + struct rte_sched_subport *subport, uint32_t pos) { struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_sched_pipe *pipe = grinder->pipe; @@ -2403,8 +2516,16 @@ grinder_schedule(struct rte_sched_port *port, uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; uint32_t be_tc_active; - if (!grinder_credits_check(port, subport, pos)) - return 0; + switch (subport->is_tc_ov_enabled) { + case 1: + if (!grinder_credits_check_with_tc_ov(port, subport, pos)) + return 0; + break; + case 0: + if (!grinder_credits_check(port, subport, pos)) + return 0; + break; + } /* Advance port time */ port->time += pkt_len; @@ -2770,7 +2891,11 @@ grinder_handle(struct rte_sched_port *port, subport->profile; grinder_prefetch_tc_queue_arrays(subport, pos); - grinder_credits_update(port, subport, pos); + + if (unlikely(subport->is_tc_ov_enabled)) + grinder_credits_update_with_tc_ov(port, subport, pos); + else + grinder_credits_update(port, subport, pos); grinder->state = e_GRINDER_PREFETCH_MBUF; return 0; diff --git a/lib/sched/rte_sched.h b/lib/sched/rte_sched.h index 5ece64e527..94febe1d94 100644 --- a/lib/sched/rte_sched.h +++ b/lib/sched/rte_sched.h @@ -579,6 +579,24 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint int rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts); +/** + * Hierarchical scheduler subport TC OV enable/disable config. + * Note that this function is safe to use at runtime + * to enable/disable TC OV for subport. + * + * @param port + * Handle to port scheduler instance + * @param subport_id + * Subport ID + * @param tc_ov_enable + * Boolean flag to enable/disable TC OV + * @return + * 0 upon success, error code otherwise + */ +__rte_experimental +int +rte_sched_subport_tc_ov_config(struct rte_sched_port *port, uint32_t subport_id, bool tc_ov_enable); + #ifdef __cplusplus } #endif diff --git a/lib/sched/version.map b/lib/sched/version.map index d22c07fc9f..c6e994d8df 100644 --- a/lib/sched/version.map +++ b/lib/sched/version.map @@ -34,4 +34,7 @@ EXPERIMENTAL { # added in 21.11 rte_pie_rt_data_init; rte_pie_config_init; + + # added in 22.03 + rte_sched_subport_tc_ov_config; };