[dpdk-dev,3/8] net/mrvl: add egress scheduler/rate limiter support

Message ID 1519222460-14605-4-git-send-email-tdu@semihalf.com
State Superseded, archived
Delegated to: Ferruh Yigit
Headers show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Tomasz Duszynski Feb. 21, 2018, 2:14 p.m.
Add egress scheduler and egress rate limiter support.

Signed-off-by: Natalie Samsonov <nsamsono@marvell.com>
Signed-off-by: Tomasz Duszynski <tdu@semihalf.com>
---
 drivers/net/mrvl/mrvl_ethdev.c |   6 +-
 drivers/net/mrvl/mrvl_qos.c    | 141 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/mrvl/mrvl_qos.h    |  19 ++++++
 3 files changed, 161 insertions(+), 5 deletions(-)

Patch

diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c
index 2d59fce..e42b787 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mrvl/mrvl_ethdev.c
@@ -348,6 +348,11 @@  mrvl_dev_configure(struct rte_eth_dev *dev)
 	if (ret < 0)
 		return ret;
 
+	ret = mrvl_configure_txqs(priv, dev->data->port_id,
+				  dev->data->nb_tx_queues);
+	if (ret < 0)
+		return ret;
+
 	priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
 	priv->ppio_params.maintain_stats = 1;
 	priv->nb_rx_queues = dev->data->nb_rx_queues;
@@ -1565,7 +1570,6 @@  mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	dev->data->tx_queues[idx] = txq;
 
 	priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
-	priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
 
 	return 0;
 }
diff --git a/drivers/net/mrvl/mrvl_qos.c b/drivers/net/mrvl/mrvl_qos.c
index 854eb4d..e6d204a 100644
--- a/drivers/net/mrvl/mrvl_qos.c
+++ b/drivers/net/mrvl/mrvl_qos.c
@@ -64,12 +64,19 @@ 
 #define MRVL_TOK_PCP "pcp"
 #define MRVL_TOK_PORT "port"
 #define MRVL_TOK_RXQ "rxq"
-#define MRVL_TOK_SP "SP"
 #define MRVL_TOK_TC "tc"
 #define MRVL_TOK_TXQ "txq"
 #define MRVL_TOK_VLAN "vlan"
 #define MRVL_TOK_VLAN_IP "vlan/ip"
-#define MRVL_TOK_WEIGHT "weight"
+
+/* egress specific configuration tokens */
+#define MRVL_TOK_BURST_SIZE "burst_size"
+#define MRVL_TOK_RATE_LIMIT "rate_limit"
+#define MRVL_TOK_RATE_LIMIT_ENABLE "rate_limit_enable"
+#define MRVL_TOK_SCHED_MODE "sched_mode"
+#define MRVL_TOK_SCHED_MODE_SP "sp"
+#define MRVL_TOK_SCHED_MODE_WRR "wrr"
+#define MRVL_TOK_WRR_WEIGHT "wrr_weight"
 
 /* policer specific configuration tokens */
 #define MRVL_TOK_PLCR_ENABLE "policer_enable"
@@ -147,12 +154,69 @@  get_outq_cfg(struct rte_cfgfile *file, int port, int outq,
 	if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
 		return 0;
 
+	/* Read scheduling mode */
+	entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_SCHED_MODE);
+	if (entry) {
+		if (!strncmp(entry, MRVL_TOK_SCHED_MODE_SP,
+					strlen(MRVL_TOK_SCHED_MODE_SP))) {
+			cfg->port[port].outq[outq].sched_mode =
+				PP2_PPIO_SCHED_M_SP;
+		} else if (!strncmp(entry, MRVL_TOK_SCHED_MODE_WRR,
+					strlen(MRVL_TOK_SCHED_MODE_WRR))) {
+			cfg->port[port].outq[outq].sched_mode =
+				PP2_PPIO_SCHED_M_WRR;
+		} else {
+			RTE_LOG(ERR, PMD, "Unknown token: %s\n", entry);
+			return -1;
+		}
+	}
+
+	/* Read wrr weight */
+	if (cfg->port[port].outq[outq].sched_mode == PP2_PPIO_SCHED_M_WRR) {
+		entry = rte_cfgfile_get_entry(file, sec_name,
+				MRVL_TOK_WRR_WEIGHT);
+		if (entry) {
+			if (get_val_securely(entry, &val) < 0)
+				return -1;
+			cfg->port[port].outq[outq].weight = val;
+		}
+	}
+
+	/*
+	 * There's no point in setting rate limiting for specific outq as
+	 * global port rate limiting has priority.
+	 */
+	if (cfg->port[port].rate_limit_enable) {
+		RTE_LOG(WARNING, PMD, "Port %d rate limiting already enabled\n",
+			port);
+		return 0;
+	}
+
 	entry = rte_cfgfile_get_entry(file, sec_name,
-			MRVL_TOK_WEIGHT);
+			MRVL_TOK_RATE_LIMIT_ENABLE);
 	if (entry) {
 		if (get_val_securely(entry, &val) < 0)
 			return -1;
-		cfg->port[port].outq[outq].weight = (uint8_t)val;
+		cfg->port[port].outq[outq].rate_limit_enable = val;
+	}
+
+	if (!cfg->port[port].outq[outq].rate_limit_enable)
+		return 0;
+
+	/* Read CBS (in kB) */
+	entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_BURST_SIZE);
+	if (entry) {
+		if (get_val_securely(entry, &val) < 0)
+			return -1;
+		cfg->port[port].outq[outq].rate_limit_params.cbs = val;
+	}
+
+	/* Read CIR (in kbps) */
+	entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RATE_LIMIT);
+	if (entry) {
+		if (get_val_securely(entry, &val) < 0)
+			return -1;
+		cfg->port[port].outq[outq].rate_limit_params.cir = val;
 	}
 
 	return 0;
@@ -512,6 +576,36 @@  mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
 			}
 		}
 
+		/*
+		 * Read per-port rate limiting. Setting that will
+		 * disable per-queue rate limiting.
+		 */
+		entry = rte_cfgfile_get_entry(file, sec_name,
+				MRVL_TOK_RATE_LIMIT_ENABLE);
+		if (entry) {
+			if (get_val_securely(entry, &val) < 0)
+				return -1;
+			(*cfg)->port[n].rate_limit_enable = val;
+		}
+
+		if ((*cfg)->port[n].rate_limit_enable) {
+			entry = rte_cfgfile_get_entry(file, sec_name,
+					MRVL_TOK_BURST_SIZE);
+			if (entry) {
+				if (get_val_securely(entry, &val) < 0)
+					return -1;
+				(*cfg)->port[n].rate_limit_params.cbs = val;
+			}
+
+			entry = rte_cfgfile_get_entry(file, sec_name,
+					MRVL_TOK_RATE_LIMIT);
+			if (entry) {
+				if (get_val_securely(entry, &val) < 0)
+					return -1;
+				(*cfg)->port[n].rate_limit_params.cir = val;
+			}
+		}
+
 		entry = rte_cfgfile_get_entry(file, sec_name,
 				MRVL_TOK_MAPPING_PRIORITY);
 		if (entry) {
@@ -758,6 +852,45 @@  mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
 }
 
 /**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+		uint16_t max_queues)
+{
+	/* We need only a subset of configuration. */
+	struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid];
+	int i;
+
+	if (mrvl_qos_cfg == NULL)
+		return 0;
+
+	priv->ppio_params.rate_limit_enable = port_cfg->rate_limit_enable;
+	if (port_cfg->rate_limit_enable)
+		priv->ppio_params.rate_limit_params =
+			port_cfg->rate_limit_params;
+
+	for (i = 0; i < max_queues; i++) {
+		struct pp2_ppio_outq_params *params =
+			&priv->ppio_params.outqs_params.outqs_params[i];
+
+		params->sched_mode = port_cfg->outq[i].sched_mode;
+		params->weight = port_cfg->outq[i].weight;
+		params->rate_limit_enable = port_cfg->outq[i].rate_limit_enable;
+		params->rate_limit_params = port_cfg->outq[i].rate_limit_params;
+	}
+
+	return 0;
+}
+
+/**
  * Start QoS mapping.
  *
  * Finalize QoS table configuration and initialize it in SDK. It can be done
diff --git a/drivers/net/mrvl/mrvl_qos.h b/drivers/net/mrvl/mrvl_qos.h
index 2ff50c1..48ded5f 100644
--- a/drivers/net/mrvl/mrvl_qos.h
+++ b/drivers/net/mrvl/mrvl_qos.h
@@ -48,6 +48,8 @@ 
 /* QoS config. */
 struct mrvl_qos_cfg {
 	struct port_cfg {
+		int rate_limit_enable;
+		struct pp2_ppio_rate_limit_params rate_limit_params;
 		struct {
 			uint8_t inq[MRVL_PP2_RXQ_MAX];
 			uint8_t dscp[MRVL_CP_PER_TC];
@@ -58,7 +60,10 @@  struct mrvl_qos_cfg {
 			enum pp2_ppio_color color;
 		} tc[MRVL_PP2_TC_MAX];
 		struct {
+			enum pp2_ppio_outq_sched_mode sched_mode;
 			uint8_t weight;
+			int rate_limit_enable;
+			struct pp2_ppio_rate_limit_params rate_limit_params;
 		} outq[MRVL_PP2_RXQ_MAX];
 		enum pp2_cls_qos_tbl_type mapping_priority;
 		uint16_t inqs;
@@ -102,6 +107,20 @@  mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
 		    uint16_t max_queues);
 
 /**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+		    uint16_t max_queues);
+
+/**
  * Start QoS mapping.
  *
  * Finalize QoS table configuration and initialize it in SDK. It can be done