diff mbox series

[06/11] net/octeontx2: add tm hierarchy commit callback

Message ID 20200312111907.31555-7-ndabilpuram@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers show
Series net/octeontx2: add traffic manager support | expand

Checks

Context Check Description
ci/Intel-compilation fail Compilation issues
ci/checkpatch success coding style OK

Commit Message

Nithin Dabilpuram March 12, 2020, 11:19 a.m. UTC
Add TM hierarchy commit callback to support enabling
newly created topology.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Krzysztof Kanas <kkanas@marvell.com>
---
 drivers/net/octeontx2/otx2_tm.c | 170 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 170 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c
index 175d1d5..ae779a5 100644
--- a/drivers/net/octeontx2/otx2_tm.c
+++ b/drivers/net/octeontx2/otx2_tm.c
@@ -1668,6 +1668,101 @@  validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
 }
 
 static int
+nix_xmit_disable(struct rte_eth_dev *eth_dev)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
+	uint16_t sqb_cnt, head_off, tail_off;
+	struct otx2_nix_tm_node *tm_node;
+	struct otx2_eth_txq *txq;
+	uint64_t wdata, val;
+	int i, rc;
+
+	otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
+
+	/* Enable CGX RXTX to drain pkts */
+	if (!eth_dev->data->dev_started) {
+		rc = otx2_cgx_rxtx_start(dev);
+		if (rc)
+			return rc;
+	}
+
+	/* XON all SMQ's */
+	TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+		if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+			continue;
+		if (!(tm_node->flags & NIX_TM_NODE_HWRES))
+			continue;
+
+		rc = nix_smq_xoff(dev, tm_node, false);
+		if (rc) {
+			otx2_err("Failed to enable smq %u, rc=%d",
+				 tm_node->hw_id, rc);
+			goto cleanup;
+		}
+	}
+
+	/* Flush all tx queues */
+	for (i = 0; i < sq_cnt; i++) {
+		txq = eth_dev->data->tx_queues[i];
+
+		rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+		if (rc) {
+			otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
+			goto cleanup;
+		}
+
+		/* Wait for sq entries to be flushed */
+		rc = nix_txq_flush_sq_spin(txq);
+		if (rc) {
+			otx2_err("Failed to drain sq, rc=%d\n", rc);
+			goto cleanup;
+		}
+	}
+
+	/* XOFF & Flush all SMQ's. HRM mandates
+	 * all SQ's empty before SMQ flush is issued.
+	 */
+	TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+		if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+			continue;
+		if (!(tm_node->flags & NIX_TM_NODE_HWRES))
+			continue;
+
+		rc = nix_smq_xoff(dev, tm_node, true);
+		if (rc) {
+			otx2_err("Failed to enable smq %u, rc=%d",
+				 tm_node->hw_id, rc);
+			goto cleanup;
+		}
+	}
+
+	/* Verify sanity of all tx queues */
+	for (i = 0; i < sq_cnt; i++) {
+		txq = eth_dev->data->tx_queues[i];
+
+		wdata = ((uint64_t)txq->sq << 32);
+		val = otx2_atomic64_add_nosync(wdata,
+			       (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
+
+		sqb_cnt = val & 0xFFFF;
+		head_off = (val >> 20) & 0x3F;
+		tail_off = (val >> 28) & 0x3F;
+
+		if (sqb_cnt > 1 || head_off != tail_off ||
+		    (*txq->fc_mem != txq->nb_sqb_bufs))
+			otx2_err("Failed to gracefully flush sq %u", txq->sq);
+	}
+
+cleanup:
+	/* restore cgx state */
+	if (!eth_dev->data->dev_started)
+		rc |= otx2_cgx_rxtx_stop(dev);
+
+	return rc;
+}
+
+static int
 nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
 		uint32_t parent_node_id, uint32_t priority,
 		uint32_t weight, uint32_t lvl,
@@ -1879,11 +1974,86 @@  nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
 	return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
 }
 
+static int
+nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
+			int clear_on_fail,
+			struct rte_tm_error *error)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	struct otx2_nix_tm_node *tm_node;
+	uint32_t leaf_cnt = 0;
+	int rc;
+
+	if (dev->tm_flags & NIX_TM_COMMITTED) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "hierarchy exists";
+		return -EINVAL;
+	}
+
+	/* Check if we have all the leaf nodes */
+	TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+		if (tm_node->flags & NIX_TM_NODE_USER &&
+		    tm_node->id < dev->tm_leaf_cnt)
+			leaf_cnt++;
+	}
+
+	if (leaf_cnt != dev->tm_leaf_cnt) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "incomplete hierarchy";
+		return -EINVAL;
+	}
+
+	/*
+	 * Disable xmit will be enabled when
+	 * new topology is available.
+	 */
+	rc = nix_xmit_disable(eth_dev);
+	if (rc) {
+		otx2_err("failed to disable TX, rc=%d", rc);
+		return -EIO;
+	}
+
+	/* Delete default/ratelimit tree */
+	if (dev->tm_flags & (NIX_TM_DEFAULT_TREE)) {
+		rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
+		if (rc) {
+			error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+			error->message = "failed to free default resources";
+			return rc;
+		}
+		dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE);
+	}
+
+	/* Free up user alloc'ed resources */
+	rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
+				   NIX_TM_NODE_USER, true);
+	if (rc) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "failed to free user resources";
+		return rc;
+	}
+
+	rc = nix_tm_alloc_resources(eth_dev, true);
+	if (rc) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "alloc resources failed";
+		/* TODO should we restore default config ? */
+		if (clear_on_fail)
+			nix_tm_free_resources(dev, 0, 0, false);
+		return rc;
+	}
+
+	error->type = RTE_TM_ERROR_TYPE_NONE;
+	dev->tm_flags |= NIX_TM_COMMITTED;
+	return 0;
+}
+
 const struct rte_tm_ops otx2_tm_ops = {
 	.node_add = nix_tm_node_add,
 	.node_delete = nix_tm_node_delete,
 	.node_suspend = nix_tm_node_suspend,
 	.node_resume = nix_tm_node_resume,
+	.hierarchy_commit = nix_tm_hierarchy_commit,
 };
 
 static int