diff mbox series

[v3,06/62] common/cnxk: add provision to enable RED on RQ

Message ID 20210618103741.26526-7-ndabilpuram@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Jerin Jacob
Headers show
Series Marvell CNXK Ethdev Driver | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Nithin Dabilpuram June 18, 2021, 10:36 a.m. UTC
From: Satha Rao <skoteshwar@marvell.com>

Send RED pass/drop levels based on rq configurations to kernel.
Fixed the aura and pool shift value calculation.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |  8 ++++++
 drivers/common/cnxk/roc_nix_queue.c | 50 +++++++++++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_npa.c       |  8 ++++--
 drivers/common/cnxk/roc_npa.h       |  5 ++++
 4 files changed, 69 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 6d9ac10..bb69027 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -161,6 +161,14 @@  struct roc_nix_rq {
 	uint32_t vwqe_max_sz_exp;
 	uint64_t vwqe_wait_tmo;
 	uint64_t vwqe_aura_handle;
+	/* Average LPB aura level drop threshold for RED */
+	uint8_t red_drop;
+	/* Average LPB aura level pass threshold for RED */
+	uint8_t red_pass;
+	/* Average SPB aura level drop threshold for RED */
+	uint8_t spb_red_drop;
+	/* Average SPB aura level pass threshold for RED */
+	uint8_t spb_red_pass;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
 };
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 1c62aa2..0604e7a 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -119,6 +119,15 @@  rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 	aq->rq.qint_idx = rq->qid % nix->qints;
 	aq->rq.xqe_drop_ena = 1;
 
+	/* If RED enabled, then fill enable for all cases */
+	if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+		aq->rq.spb_aura_pass = rq->spb_red_pass;
+		aq->rq.lpb_aura_pass = rq->red_pass;
+
+		aq->rq.spb_aura_drop = rq->spb_red_drop;
+		aq->rq.lpb_aura_drop = rq->red_drop;
+	}
+
 	if (cfg) {
 		if (rq->sso_ena) {
 			/* SSO mode */
@@ -155,6 +164,14 @@  rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 		aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
 		aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
 		aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
+
+		if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+			aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
+			aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
+
+			aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
+			aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
+		}
 	}
 
 	return 0;
@@ -244,6 +261,23 @@  rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 	aq->rq.qint_idx = rq->qid % nix->qints;
 	aq->rq.xqe_drop_ena = 1;
 
+	/* If RED enabled, then fill enable for all cases */
+	if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+		aq->rq.spb_pool_pass = rq->red_pass;
+		aq->rq.spb_aura_pass = rq->red_pass;
+		aq->rq.lpb_pool_pass = rq->red_pass;
+		aq->rq.lpb_aura_pass = rq->red_pass;
+		aq->rq.wqe_pool_pass = rq->red_pass;
+		aq->rq.xqe_pass = rq->red_pass;
+
+		aq->rq.spb_pool_drop = rq->red_drop;
+		aq->rq.spb_aura_drop = rq->red_drop;
+		aq->rq.lpb_pool_drop = rq->red_drop;
+		aq->rq.lpb_aura_drop = rq->red_drop;
+		aq->rq.wqe_pool_drop = rq->red_drop;
+		aq->rq.xqe_drop = rq->red_drop;
+	}
+
 	if (cfg) {
 		if (rq->sso_ena) {
 			/* SSO mode */
@@ -296,6 +330,22 @@  rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 		aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
 		aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
 		aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
+
+		if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+			aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
+			aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
+			aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
+			aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
+			aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
+			aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
+
+			aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
+			aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
+			aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
+			aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
+			aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
+			aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
+		}
 	}
 
 	return 0;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 5ba6e81..d064d12 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -278,13 +278,15 @@  npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 	/* Update aura fields */
 	aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
 	aura->ena = 1;
-	aura->shift = __builtin_clz(block_count) - 8;
+	aura->shift = plt_log2_u32(block_count);
+	aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
 	aura->limit = block_count;
 	aura->pool_caching = 1;
 	aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
 	aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
 	aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
 	aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+	aura->avg_con = ROC_NPA_AVG_CONT;
 	/* Many to one reduction */
 	aura->err_qint_idx = aura_id % lf->qints;
 
@@ -293,13 +295,15 @@  npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 	pool->ena = 1;
 	pool->buf_size = block_size / ROC_ALIGN;
 	pool->stack_max_pages = stack_size;
-	pool->shift = __builtin_clz(block_count) - 8;
+	pool->shift = plt_log2_u32(block_count);
+	pool->shift = pool->shift < 8 ? 0 : pool->shift - 8;
 	pool->ptr_start = 0;
 	pool->ptr_end = ~0;
 	pool->stack_caching = 1;
 	pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
 	pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
 	pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
+	pool->avg_con = ROC_NPA_AVG_CONT;
 
 	/* Many to one reduction */
 	pool->err_qint_idx = pool_id % lf->qints;
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 59d6223..3fc6192 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -12,6 +12,11 @@ 
 #define ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS 512
 #define ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS  15
 
+/* This value controls how much of the present average resource level is used to
+ * calculate the new resource level.
+ */
+#define ROC_NPA_AVG_CONT 0xE0
+
 /* 16 CASP instructions can be outstanding in CN9k, but we use only 15
  * outstanding CASPs as we run out of registers.
  */