From patchwork Thu Jul 11 10:26:58 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jasvinder Singh X-Patchwork-Id: 56335 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1CA261BDE4; Thu, 11 Jul 2019 12:27:12 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id A607B1B9CD for ; Thu, 11 Jul 2019 12:27:05 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 11 Jul 2019 03:27:05 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.63,478,1557212400"; d="scan'208";a="317641433" Received: from silpixa00381635.ir.intel.com (HELO silpixa00381635.ger.corp.intel.com) ([10.237.223.4]) by orsmga004.jf.intel.com with ESMTP; 11 Jul 2019 03:27:02 -0700 From: Jasvinder Singh To: dev@dpdk.org Cc: cristian.dumitrescu@intel.com, Abraham Tovar , Lukasz Krakowiak Date: Thu, 11 Jul 2019 11:26:58 +0100 Message-Id: <20190711102659.59001-11-jasvinder.singh@intel.com> X-Mailer: git-send-email 2.21.0 In-Reply-To: <20190711102659.59001-1-jasvinder.singh@intel.com> References: <20190625153217.24301-2-jasvinder.singh@intel.com> <20190711102659.59001-1-jasvinder.singh@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v3 10/11] examples/qos_sched: add tc and queue config flexibility X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Update qos sched sample app for configuration flexibility of pipe traffic classes and queues. Signed-off-by: Jasvinder Singh Signed-off-by: Abraham Tovar Signed-off-by: Lukasz Krakowiak --- examples/qos_sched/app_thread.c | 9 +- examples/qos_sched/cfg_file.c | 119 +++++--- examples/qos_sched/init.c | 63 +++- examples/qos_sched/main.h | 4 + examples/qos_sched/profile.cfg | 66 +++- examples/qos_sched/profile_ov.cfg | 54 +++- examples/qos_sched/stats.c | 483 +++++++++++++++++------------- 7 files changed, 517 insertions(+), 281 deletions(-) diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c index e14b275e3..1ce3639ee 100644 --- a/examples/qos_sched/app_thread.c +++ b/examples/qos_sched/app_thread.c @@ -20,13 +20,11 @@ * QoS parameters are encoded as follows: * Outer VLAN ID defines subport * Inner VLAN ID defines pipe - * Destination IP 0.0.XXX.0 defines traffic class * Destination IP host (0.0.0.XXX) defines queue * Values below define offset to each field from start of frame */ #define SUBPORT_OFFSET 7 #define PIPE_OFFSET 9 -#define TC_OFFSET 20 #define QUEUE_OFFSET 20 #define COLOR_OFFSET 19 @@ -40,10 +38,9 @@ get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe, (port_params.n_subports_per_port - 1); /* Outer VLAN ID*/ *pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) & (port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */ - *traffic_class = (pdata[QUEUE_OFFSET] & 0x0F) & - (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1); /* Destination IP */ - *queue = ((pdata[QUEUE_OFFSET] >> 8) & 0x0F) & - (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1) ; /* Destination IP */ + *queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues]; + *traffic_class = (*queue > (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) ? + (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) : *queue); /* Destination IP */ *color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */ return 0; diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c index 76ffffc4b..522de1aea 100644 --- a/examples/qos_sched/cfg_file.c +++ b/examples/qos_sched/cfg_file.c @@ -29,6 +29,9 @@ cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params if (!cfg || !port_params) return -1; + memset(active_queues, 0, sizeof(active_queues)); + n_active_queues = 0; + entry = rte_cfgfile_get_entry(cfg, "port", "frame overhead"); if (entry) port_params->frame_overhead = (uint32_t)atoi(entry); @@ -45,8 +48,12 @@ cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params if (entry) { char *next; - for(j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) { + for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) { port_params->qsize[j] = (uint16_t)strtol(entry, &next, 10); + if (port_params->qsize[j] != 0) { + active_queues[n_active_queues] = j; + n_active_queues++; + } if (next == NULL) break; entry = next; @@ -173,46 +180,52 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe_params if (entry) pipe_params[j].tc_rate[3] = (uint32_t)atoi(entry); + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 4 rate"); + if (entry) + pipe_params[j].tc_rate[4] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 5 rate"); + if (entry) + pipe_params[j].tc_rate[5] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 6 rate"); + if (entry) + pipe_params[j].tc_rate[6] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 7 rate"); + if (entry) + pipe_params[j].tc_rate[7] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 8 rate"); + if (entry) + pipe_params[j].tc_rate[8] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 9 rate"); + if (entry) + pipe_params[j].tc_rate[9] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 10 rate"); + if (entry) + pipe_params[j].tc_rate[10] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 11 rate"); + if (entry) + pipe_params[j].tc_rate[11] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 12 rate"); + if (entry) + pipe_params[j].tc_rate[12] = (uint32_t)atoi(entry); + #ifdef RTE_SCHED_SUBPORT_TC_OV - entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 oversubscription weight"); + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 12 oversubscription weight"); if (entry) pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry); #endif - entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 0 wrr weights"); + entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 12 wrr weights"); if (entry) { - for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) { - pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] = - (uint8_t)strtol(entry, &next, 10); - if (next == NULL) - break; - entry = next; - } - } - entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 1 wrr weights"); - if (entry) { - for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) { - pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] = - (uint8_t)strtol(entry, &next, 10); - if (next == NULL) - break; - entry = next; - } - } - entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 2 wrr weights"); - if (entry) { - for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) { - pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] = - (uint8_t)strtol(entry, &next, 10); - if (next == NULL) - break; - entry = next; - } - } - entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 wrr weights"); - if (entry) { - for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) { - pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] = + for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) { + pipe_params[j].wrr_weights[i] = (uint8_t)strtol(entry, &next, 10); if (next == NULL) break; @@ -267,6 +280,42 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo if (entry) subport_params[i].tc_rate[3] = (uint32_t)atoi(entry); + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 4 rate"); + if (entry) + subport_params[i].tc_rate[4] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 5 rate"); + if (entry) + subport_params[i].tc_rate[5] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 6 rate"); + if (entry) + subport_params[i].tc_rate[6] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 7 rate"); + if (entry) + subport_params[i].tc_rate[7] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 8 rate"); + if (entry) + subport_params[i].tc_rate[8] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 9 rate"); + if (entry) + subport_params[i].tc_rate[9] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 10 rate"); + if (entry) + subport_params[i].tc_rate[10] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 11 rate"); + if (entry) + subport_params[i].tc_rate[11] = (uint32_t)atoi(entry); + + entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 12 rate"); + if (entry) + subport_params[i].tc_rate[12] = (uint32_t)atoi(entry); + int n_entries = rte_cfgfile_section_num_entries(cfg, sec_name); struct rte_cfgfile_entry entries[n_entries]; diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c index 6b63d4e0e..5fd2a38e4 100644 --- a/examples/qos_sched/init.c +++ b/examples/qos_sched/init.c @@ -170,17 +170,20 @@ static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = { .tb_rate = 1250000000, .tb_size = 1000000, - .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000}, + .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000, + 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, + 1250000000, 1250000000, 1250000000, 1250000000}, .tc_period = 10, }, }; -static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = { +static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = { { /* Profile #0 */ .tb_rate = 305175, .tb_size = 1000000, - .tc_rate = {305175, 305175, 305175, 305175}, + .tc_rate = {305175, 305175, 305175, 305175, 305175, 305175, + 305175, 305175, 305175, 305175, 305175, 305175, 305175}, .tc_period = 40, #ifdef RTE_SCHED_SUBPORT_TC_OV .tc_ov_weight = 1, @@ -198,9 +201,10 @@ struct rte_sched_port_params port_params = { .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT, .n_subports_per_port = 1, .n_pipes_per_subport = 4096, - .qsize = {64, 64, 64, 64}, + .qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64}, .pipe_profiles = pipe_profiles, .n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params), + .n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES, #ifdef RTE_SCHED_RED .red_params = { @@ -222,8 +226,53 @@ struct rte_sched_port_params port_params = { /* Traffic Class 3 - Colors Green / Yellow / Red */ [3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, [3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, - [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9} - } + [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 4 - Colors Green / Yellow / Red */ + [4][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [4][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [4][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 5 - Colors Green / Yellow / Red */ + [5][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [5][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [5][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 6 - Colors Green / Yellow / Red */ + [6][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [6][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [6][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 7 - Colors Green / Yellow / Red */ + [7][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [7][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [7][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 8 - Colors Green / Yellow / Red */ + [8][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [8][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [8][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 9 - Colors Green / Yellow / Red */ + [9][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [9][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [9][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 10 - Colors Green / Yellow / Red */ + [10][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [10][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [10][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 11 - Colors Green / Yellow / Red */ + [11][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [11][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [11][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + + /* Traffic Class 12 - Colors Green / Yellow / Red */ + [12][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + [12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, + }, #endif /* RTE_SCHED_RED */ }; @@ -255,7 +304,7 @@ app_init_sched_port(uint32_t portid, uint32_t socketid) subport, err); } - for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) { + for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe++) { if (app_pipe_to_profile[subport][pipe] != -1) { err = rte_sched_pipe_config(port, subport, pipe, app_pipe_to_profile[subport][pipe]); diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h index 8a2741c58..d8f890b64 100644 --- a/examples/qos_sched/main.h +++ b/examples/qos_sched/main.h @@ -50,6 +50,7 @@ extern "C" { #define MAX_DATA_STREAMS (APP_MAX_LCORE/2) #define MAX_SCHED_SUBPORTS 8 #define MAX_SCHED_PIPES 4096 +#define MAX_SCHED_PIPE_PROFILES 256 #ifndef APP_COLLECT_STAT #define APP_COLLECT_STAT 1 @@ -147,6 +148,9 @@ extern struct burst_conf burst_conf; extern struct ring_thresh rx_thresh; extern struct ring_thresh tx_thresh; +uint32_t active_queues[RTE_SCHED_QUEUES_PER_PIPE]; +uint32_t n_active_queues; + extern struct rte_sched_port_params port_params; int app_parse_args(int argc, char **argv); diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg index f5b704cc6..55fd7d1e0 100644 --- a/examples/qos_sched/profile.cfg +++ b/examples/qos_sched/profile.cfg @@ -1,6 +1,6 @@ ; BSD LICENSE ; -; Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +; Copyright(c) 2010-2019 Intel Corporation. All rights reserved. ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without @@ -33,12 +33,12 @@ ; 10GbE output port: ; * Single subport (subport 0): ; - Subport rate set to 100% of port rate -; - Each of the 4 traffic classes has rate set to 100% of port rate +; - Each of the 9 traffic classes has rate set to 100% of port rate ; * 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration: ; - Pipe rate set to 1/4K of port rate -; - Each of the 4 traffic classes has rate set to 100% of pipe rate -; - Within each traffic class, the byte-level WRR weights for the 4 queues -; are set to 1:1:1:1 +; - Each of the 9 traffic classes has rate set to 100% of pipe rate +; - Within lowest priority traffic class (best-effort), the byte-level +; WRR weights for the 8 queues are set to 1:1:1:1:1:1:1:1 ; ; For more details, please refer to chapter "Quality of Service (QoS) Framework" ; of Data Plane Development Kit (DPDK) Programmer's Guide. @@ -48,7 +48,7 @@ frame overhead = 24 number of subports per port = 1 number of pipes per subport = 4096 -queue sizes = 64 64 64 64 +queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 ; Subport configuration [subport 0] @@ -59,6 +59,16 @@ tc 0 rate = 1250000000 ; Bytes per second tc 1 rate = 1250000000 ; Bytes per second tc 2 rate = 1250000000 ; Bytes per second tc 3 rate = 1250000000 ; Bytes per second +tc 4 rate = 1250000000 ; Bytes per second +tc 5 rate = 1250000000 ; Bytes per second +tc 6 rate = 1250000000 ; Bytes per second +tc 7 rate = 1250000000 ; Bytes per second +tc 8 rate = 1250000000 ; Bytes per second +tc 9 rate = 1250000000 ; Bytes per second +tc 10 rate = 1250000000 ; Bytes per second +tc 11 rate = 1250000000 ; Bytes per second +tc 12 rate = 1250000000 ; Bytes per second + tc period = 10 ; Milliseconds pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 @@ -72,14 +82,21 @@ tc 0 rate = 305175 ; Bytes per second tc 1 rate = 305175 ; Bytes per second tc 2 rate = 305175 ; Bytes per second tc 3 rate = 305175 ; Bytes per second -tc period = 40 ; Milliseconds +tc 4 rate = 305175 ; Bytes per second +tc 5 rate = 305175 ; Bytes per second +tc 6 rate = 305175 ; Bytes per second +tc 7 rate = 305175 ; Bytes per second +tc 8 rate = 305175 ; Bytes per second +tc 9 rate = 305175 ; Bytes per second +tc 10 rate = 305175 ; Bytes per second +tc 11 rate = 305175 ; Bytes per second +tc 12 rate = 305175 ; Bytes per second + +tc period = 40 ; Milliseconds -tc 3 oversubscription weight = 1 +tc 12 oversubscription weight = 1 -tc 0 wrr weights = 1 1 1 1 -tc 1 wrr weights = 1 1 1 1 -tc 2 wrr weights = 1 1 1 1 -tc 3 wrr weights = 1 1 1 1 +tc 12 wrr weights = 1 1 1 1 ; RED params per traffic class and color (Green / Yellow / Red) [red] @@ -102,3 +119,28 @@ tc 3 wred min = 48 40 32 tc 3 wred max = 64 64 64 tc 3 wred inv prob = 10 10 10 tc 3 wred weight = 9 9 9 + +tc 4 wred min = 48 40 32 +tc 4 wred max = 64 64 64 +tc 4 wred inv prob = 10 10 10 +tc 4 wred weight = 9 9 9 + +tc 5 wred min = 48 40 32 +tc 5 wred max = 64 64 64 +tc 5 wred inv prob = 10 10 10 +tc 5 wred weight = 9 9 9 + +tc 6 wred min = 48 40 32 +tc 6 wred max = 64 64 64 +tc 6 wred inv prob = 10 10 10 +tc 6 wred weight = 9 9 9 + +tc 7 wred min = 48 40 32 +tc 7 wred max = 64 64 64 +tc 7 wred inv prob = 10 10 10 +tc 7 wred weight = 9 9 9 + +tc 8 wred min = 48 40 32 +tc 8 wred max = 64 64 64 +tc 8 wred inv prob = 10 10 10 +tc 8 wred weight = 9 9 9 diff --git a/examples/qos_sched/profile_ov.cfg b/examples/qos_sched/profile_ov.cfg index 33000df9e..d5d9b321e 100644 --- a/examples/qos_sched/profile_ov.cfg +++ b/examples/qos_sched/profile_ov.cfg @@ -1,6 +1,6 @@ ; BSD LICENSE ; -; Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +; Copyright(c) 2010-2019 Intel Corporation. All rights reserved. ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without @@ -34,7 +34,7 @@ frame overhead = 24 number of subports per port = 1 number of pipes per subport = 32 -queue sizes = 64 64 64 64 +queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 ; Subport configuration [subport 0] @@ -45,6 +45,15 @@ tc 0 rate = 8400000 ; Bytes per second tc 1 rate = 8400000 ; Bytes per second tc 2 rate = 8400000 ; Bytes per second tc 3 rate = 8400000 ; Bytes per second +tc 4 rate = 8400000 ; Bytes per second +tc 5 rate = 8400000 ; Bytes per second +tc 6 rate = 8400000 ; Bytes per second +tc 7 rate = 8400000 ; Bytes per second +tc 8 rate = 8400000 ; Bytes per second +tc 9 rate = 8400000 ; Bytes per second +tc 10 rate = 8400000 ; Bytes per second +tc 11 rate = 8400000 ; Bytes per second +tc 12 rate = 8400000 ; Bytes per second tc period = 10 ; Milliseconds pipe 0-31 = 0 ; These pipes are configured with pipe profile 0 @@ -58,14 +67,20 @@ tc 0 rate = 16800000 ; Bytes per second tc 1 rate = 16800000 ; Bytes per second tc 2 rate = 16800000 ; Bytes per second tc 3 rate = 16800000 ; Bytes per second +tc 4 rate = 16800000 ; Bytes per second +tc 5 rate = 16800000 ; Bytes per second +tc 6 rate = 16800000 ; Bytes per second +tc 7 rate = 16800000 ; Bytes per second +tc 8 rate = 16800000 ; Bytes per second +tc 9 rate = 16800000 ; Bytes per second +tc 10 rate = 16800000 ; Bytes per second +tc 11 rate = 16800000 ; Bytes per second +tc 12 rate = 16800000 ; Bytes per second tc period = 28 ; Milliseconds -tc 3 oversubscription weight = 1 +tc 12 oversubscription weight = 1 -tc 0 wrr weights = 1 1 1 1 -tc 1 wrr weights = 1 1 1 1 -tc 2 wrr weights = 1 1 1 1 -tc 3 wrr weights = 1 1 1 1 +tc 12 wrr weights = 1 1 1 1 ; RED params per traffic class and color (Green / Yellow / Red) [red] @@ -88,3 +103,28 @@ tc 3 wred min = 48 40 32 tc 3 wred max = 64 64 64 tc 3 wred inv prob = 10 10 10 tc 3 wred weight = 9 9 9 + +tc 4 wred min = 48 40 32 +tc 4 wred max = 64 64 64 +tc 4 wred inv prob = 10 10 10 +tc 4 wred weight = 9 9 9 + +tc 5 wred min = 48 40 32 +tc 5 wred max = 64 64 64 +tc 5 wred inv prob = 10 10 10 +tc 5 wred weight = 9 9 9 + +tc 6 wred min = 48 40 32 +tc 6 wred max = 64 64 64 +tc 6 wred inv prob = 10 10 10 +tc 6 wred weight = 9 9 9 + +tc 7 wred min = 48 40 32 +tc 7 wred max = 64 64 64 +tc 7 wred inv prob = 10 10 10 +tc 7 wred weight = 9 9 9 + +tc 8 wred min = 48 40 32 +tc 8 wred max = 64 64 64 +tc 8 wred inv prob = 10 10 10 +tc 8 wred weight = 9 9 9 diff --git a/examples/qos_sched/stats.c b/examples/qos_sched/stats.c index 8193d964c..4f5fdda47 100644 --- a/examples/qos_sched/stats.c +++ b/examples/qos_sched/stats.c @@ -11,278 +11,333 @@ int qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q) { - struct rte_sched_queue_stats stats; - struct rte_sched_port *port; - uint16_t qlen; - uint32_t queue_id, count, i; - uint32_t average; - - for (i = 0; i < nb_pfc; i++) { - if (qos_conf[i].tx_port == port_id) - break; - } - if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport - || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE || q >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) - return -1; - - port = qos_conf[i].sched_port; - - queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id); - queue_id = queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + q); - - average = 0; - - for (count = 0; count < qavg_ntimes; count++) { - rte_sched_queue_read_stats(port, queue_id, &stats, &qlen); - average += qlen; - usleep(qavg_period); - } - - average /= qavg_ntimes; - - printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); - - return 0; + struct rte_sched_queue_stats stats; + struct rte_sched_port *port; + uint16_t qlen; + uint32_t count, i, queue_id = 0; + uint32_t average; + + for (i = 0; i < nb_pfc; i++) { + if (qos_conf[i].tx_port == port_id) + break; + } + + if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || + pipe_id >= port_params.n_pipes_per_subport || + tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE || + q >= RTE_SCHED_BE_QUEUES_PER_PIPE || + (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1 && q > 0)) + return -1; + + port = qos_conf[i].sched_port; + for (i = 0; i < subport_id; i++) + queue_id += port_params.n_pipes_per_subport * + RTE_SCHED_QUEUES_PER_PIPE; + if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) + queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc; + else + queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc + q; + + average = 0; + for (count = 0; count < qavg_ntimes; count++) { + rte_sched_queue_read_stats(port, queue_id, &stats, &qlen); + average += qlen; + usleep(qavg_period); + } + + average /= qavg_ntimes; + + printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); + + return 0; } int qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, - uint8_t tc) + uint8_t tc) { - struct rte_sched_queue_stats stats; - struct rte_sched_port *port; - uint16_t qlen; - uint32_t queue_id, count, i; - uint32_t average, part_average; + struct rte_sched_queue_stats stats; + struct rte_sched_port *port; + uint16_t qlen; + uint32_t count, i, queue_id = 0; + uint32_t average, part_average; + + for (i = 0; i < nb_pfc; i++) { + if (qos_conf[i].tx_port == port_id) + break; + } + + if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || + pipe_id >= port_params.n_pipes_per_subport || + tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + return -1; + + port = qos_conf[i].sched_port; - for (i = 0; i < nb_pfc; i++) { - if (qos_conf[i].tx_port == port_id) - break; - } - if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport - || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - return -1; + for (i = 0; i < subport_id; i++) + queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE; - port = qos_conf[i].sched_port; + queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc; - queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id); + average = 0; - average = 0; + for (count = 0; count < qavg_ntimes; count++) { + part_average = 0; - for (count = 0; count < qavg_ntimes; count++) { - part_average = 0; - for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) { - rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + i), &stats, &qlen); - part_average += qlen; - } - average += part_average / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; - usleep(qavg_period); - } + if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) { + rte_sched_queue_read_stats(port, queue_id, &stats, &qlen); + part_average += qlen; + } else { + for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) { + rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen); + part_average += qlen; + } + average += part_average / RTE_SCHED_BE_QUEUES_PER_PIPE; + } + usleep(qavg_period); + } - average /= qavg_ntimes; + average /= qavg_ntimes; - printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); + printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); - return 0; + return 0; } int qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id) { - struct rte_sched_queue_stats stats; - struct rte_sched_port *port; - uint16_t qlen; - uint32_t queue_id, count, i; - uint32_t average, part_average; + struct rte_sched_queue_stats stats; + struct rte_sched_port *port; + uint16_t qlen; + uint32_t count, i, queue_id = 0; + uint32_t average, part_average; - for (i = 0; i < nb_pfc; i++) { - if (qos_conf[i].tx_port == port_id) - break; - } - if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport) - return -1; + for (i = 0; i < nb_pfc; i++) { + if (qos_conf[i].tx_port == port_id) + break; + } - port = qos_conf[i].sched_port; + if (i == nb_pfc || + subport_id >= port_params.n_subports_per_port || + pipe_id >= port_params.n_pipes_per_subport) + return -1; - queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id); + port = qos_conf[i].sched_port; - average = 0; + for (i = 0; i < subport_id; i++) + queue_id += port_params.n_pipes_per_subport * + RTE_SCHED_QUEUES_PER_PIPE; - for (count = 0; count < qavg_ntimes; count++) { - part_average = 0; - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) { - rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen); - part_average += qlen; - } - average += part_average / (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS); - usleep(qavg_period); - } + queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE; - average /= qavg_ntimes; + average = 0; - printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); + for (count = 0; count < qavg_ntimes; count++) { + part_average = 0; + for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { + rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen); + part_average += qlen; + } + average += part_average / RTE_SCHED_QUEUES_PER_PIPE; + usleep(qavg_period); + } - return 0; + average /= qavg_ntimes; + + printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); + + return 0; } int qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc) { - struct rte_sched_queue_stats stats; - struct rte_sched_port *port; - uint16_t qlen; - uint32_t queue_id, count, i, j; - uint32_t average, part_average; - - for (i = 0; i < nb_pfc; i++) { - if (qos_conf[i].tx_port == port_id) - break; - } - if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - return -1; - - port = qos_conf[i].sched_port; - - average = 0; - - for (count = 0; count < qavg_ntimes; count++) { - part_average = 0; - for (i = 0; i < port_params.n_pipes_per_subport; i++) { - queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i); - - for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) { - rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen); - part_average += qlen; - } - } - - average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS); - usleep(qavg_period); - } - - average /= qavg_ntimes; - - printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); - - return 0; + struct rte_sched_queue_stats stats; + struct rte_sched_port *port; + uint16_t qlen; + uint32_t queue_id, count, i, j, subport_queue_id = 0; + uint32_t average, part_average; + + for (i = 0; i < nb_pfc; i++) { + if (qos_conf[i].tx_port == port_id) + break; + } + + if (i == nb_pfc || + subport_id >= port_params.n_subports_per_port || + tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + return -1; + + port = qos_conf[i].sched_port; + + for (i = 0; i < subport_id; i++) + subport_queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE; + + average = 0; + + for (count = 0; count < qavg_ntimes; count++) { + part_average = 0; + for (i = 0; i < port_params.n_pipes_per_subport; i++) { + if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) { + queue_id = subport_queue_id + i * RTE_SCHED_QUEUES_PER_PIPE + tc; + rte_sched_queue_read_stats(port, queue_id, &stats, &qlen); + part_average += qlen; + } else { + for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) { + queue_id = subport_queue_id + + i * RTE_SCHED_QUEUES_PER_PIPE + tc + j; + rte_sched_queue_read_stats(port, queue_id, &stats, &qlen); + part_average += qlen; + } + } + } + + if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) + average += part_average / (port_params.n_pipes_per_subport); + else + average += part_average / (port_params.n_pipes_per_subport) * RTE_SCHED_BE_QUEUES_PER_PIPE; + + usleep(qavg_period); + } + + average /= qavg_ntimes; + + printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); + + return 0; } int qavg_subport(uint16_t port_id, uint32_t subport_id) { - struct rte_sched_queue_stats stats; - struct rte_sched_port *port; - uint16_t qlen; - uint32_t queue_id, count, i, j; - uint32_t average, part_average; + struct rte_sched_queue_stats stats; + struct rte_sched_port *port; + uint16_t qlen; + uint32_t queue_id, count, i, j, subport_queue_id = 0; + uint32_t average, part_average; + + for (i = 0; i < nb_pfc; i++) { + if (qos_conf[i].tx_port == port_id) + break; + } + + if (i == nb_pfc || + subport_id >= port_params.n_subports_per_port) + return -1; - for (i = 0; i < nb_pfc; i++) { - if (qos_conf[i].tx_port == port_id) - break; - } - if (i == nb_pfc || subport_id >= port_params.n_subports_per_port) - return -1; + port = qos_conf[i].sched_port; - port = qos_conf[i].sched_port; + for (i = 0; i < subport_id; i++) + subport_queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE; - average = 0; + average = 0; - for (count = 0; count < qavg_ntimes; count++) { - part_average = 0; - for (i = 0; i < port_params.n_pipes_per_subport; i++) { - queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i); + for (count = 0; count < qavg_ntimes; count++) { + part_average = 0; + for (i = 0; i < port_params.n_pipes_per_subport; i++) { + queue_id = subport_queue_id + i * RTE_SCHED_QUEUES_PER_PIPE; - for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) { - rte_sched_queue_read_stats(port, queue_id + j, &stats, &qlen); - part_average += qlen; - } - } + for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) { + rte_sched_queue_read_stats(port, queue_id + j, &stats, &qlen); + part_average += qlen; + } + } - average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS); - usleep(qavg_period); - } + average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE); + usleep(qavg_period); + } - average /= qavg_ntimes; + average /= qavg_ntimes; - printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); + printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average); - return 0; + return 0; } int subport_stat(uint16_t port_id, uint32_t subport_id) { - struct rte_sched_subport_stats stats; - struct rte_sched_port *port; - uint32_t tc_ov[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint8_t i; - - for (i = 0; i < nb_pfc; i++) { - if (qos_conf[i].tx_port == port_id) - break; - } - if (i == nb_pfc || subport_id >= port_params.n_subports_per_port) - return -1; - - port = qos_conf[i].sched_port; + struct rte_sched_subport_stats stats; + struct rte_sched_port *port; + uint32_t tc_ov[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint8_t i; + + for (i = 0; i < nb_pfc; i++) { + if (qos_conf[i].tx_port == port_id) + break; + } + + if (i == nb_pfc || subport_id >= port_params.n_subports_per_port) + return -1; + + port = qos_conf[i].sched_port; memset (tc_ov, 0, sizeof(tc_ov)); - rte_sched_subport_read_stats(port, subport_id, &stats, tc_ov); + rte_sched_subport_read_stats(port, subport_id, &stats, tc_ov); - printf("\n"); - printf("+----+-------------+-------------+-------------+-------------+-------------+\n"); - printf("| TC | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| OV Status |\n"); - printf("+----+-------------+-------------+-------------+-------------+-------------+\n"); + printf("\n"); + printf("+----+-------------+-------------+-------------+-------------+-------------+\n"); + printf("| TC | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| OV Status |\n"); + printf("+----+-------------+-------------+-------------+-------------+-------------+\n"); - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - printf("| %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " |\n", i, - stats.n_pkts_tc[i], stats.n_pkts_tc_dropped[i], - stats.n_bytes_tc[i], stats.n_bytes_tc_dropped[i], tc_ov[i]); - printf("+----+-------------+-------------+-------------+-------------+-------------+\n"); - } - printf("\n"); + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + printf("| %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " |\n", i, + stats.n_pkts_tc[i], stats.n_pkts_tc_dropped[i], + stats.n_bytes_tc[i], stats.n_bytes_tc_dropped[i], tc_ov[i]); + printf("+----+-------------+-------------+-------------+-------------+-------------+\n"); + } + printf("\n"); - return 0; + return 0; } int pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id) { - struct rte_sched_queue_stats stats; - struct rte_sched_port *port; - uint16_t qlen; - uint8_t i, j; - uint32_t queue_id; - - for (i = 0; i < nb_pfc; i++) { - if (qos_conf[i].tx_port == port_id) - break; - } - if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport) - return -1; - - port = qos_conf[i].sched_port; - - queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id); - - printf("\n"); - printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); - printf("| TC | Queue | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| Length |\n"); - printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); - - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) { - - rte_sched_queue_read_stats(port, queue_id + (i * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen); - - printf("| %d | %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, j, - stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen); - printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); - } - if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) - printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); - } - printf("\n"); - - return 0; + struct rte_sched_queue_stats stats; + struct rte_sched_port *port; + uint16_t qlen; + uint8_t i, j; + uint32_t queue_id = 0; + + for (i = 0; i < nb_pfc; i++) { + if (qos_conf[i].tx_port == port_id) + break; + } + + if (i == nb_pfc || + subport_id >= port_params.n_subports_per_port || + pipe_id >= port_params.n_pipes_per_subport) + return -1; + + port = qos_conf[i].sched_port; + for (i = 0; i < subport_id; i++) + queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE; + + queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE; + + printf("\n"); + printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); + printf("| TC | Queue | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| Length |\n"); + printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) { + rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen); + printf("| %d | %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, 0, + stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen); + printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); + } else { + for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) { + rte_sched_queue_read_stats(port, queue_id + i + j, &stats, &qlen); + printf("| %d | %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, j, + stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen); + printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n"); + } + } + } + printf("\n"); + + return 0; }