[dpdk-dev,11/28] bnxt: set the VMDQ pool size correctly

Message ID 20170328034903.41482-12-ajit.khaparde@broadcom.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/Intel-compilation fail Compilation issues
ci/checkpatch success coding style OK

Commit Message

Ajit Khaparde March 28, 2017, 3:48 a.m. UTC
  Calculate the VMDQ pool size correctly:
For PFs the VMDQ count needs to be the minimum of all used and available
resources like vnic, l2 CTX, RSS CTX, or 64 (hard coded).
For VFs restrict the number of VMDQs to 1.
If VFs are enabled, only allocate a single vnic to each function.
This allows targeting a VF with a single L2 context.

In the future, we will enable VMDq on VFs.

We are essentially mapping VMDq pools to VNICs now.  If there are
no VFs, the PF can use all VNICs as VMDq pools.  If there are VFs,
each function (including the PF) will get a single VNIC.

Signed-off-by: Stephen Hurd <stephen.hurd@broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_hwrm.c | 23 +++++++++++++++++++----
 drivers/net/bnxt/bnxt_rxq.c  |  8 +++++---
 2 files changed, 24 insertions(+), 7 deletions(-)
  

Patch

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 0aa2234..6f76748 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -312,7 +312,7 @@  int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	if (BNXT_PF(bp)) {
 		bp->pf.port_id = resp->port_id;
 		bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
-		new_max_vfs = rte_le_to_cpu_16(resp->max_vfs);
+		new_max_vfs = bp->pdev->max_vfs;
 		if (new_max_vfs != bp->pf.max_vfs) {
 			if (bp->pf.vf_info)
 				rte_free(bp->pf.vf_info);
@@ -331,7 +331,15 @@  int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
 	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+	/* TODO: For now, do not support VMDq/RFS on VFs. */
+	if (BNXT_PF(bp)) {
+		if (bp->pf.max_vfs)
+			bp->max_vnics = 1;
+		else
+			bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+	} else {
+		bp->max_vnics = 1;
+	}
 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
 	if (BNXT_PF(bp))
 		bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
@@ -1842,6 +1850,8 @@  static void populate_vf_func_cfg_req(struct bnxt *bp,
 	req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
 	req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
 	req->num_vnics = rte_cpu_to_le_16(bp->max_vnics / (num_vfs + 1));
+	/* TODO: For now, do not support VMDq/RFS on VFs. */
+	req->num_vnics = rte_cpu_to_le_16(1);
 	req->num_hw_ring_grps =
 		rte_cpu_to_le_16(bp->max_ring_grps / (num_vfs + 1));
 }
@@ -1904,7 +1914,11 @@  static void reserve_resources_from_vf(struct bnxt *bp,
 	bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
 	bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
 	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
-	bp->max_vnics -= rte_le_to_cpu_16(resp->max_vnics);
+	/* TODO:
+	 * While not supporting VMDq with VFs, max_vnics is always
+	 * forced to 1 in this case
+	 */
+	//bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
 	bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
 }
 
@@ -2025,7 +2039,8 @@  int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 			RTE_LOG(ERR, PMD,
 				"Failed to initizlie VF %d.\n", i);
 			RTE_LOG(ERR, PMD,
-				"Not all VFs available.\n");
+				"Not all VFs available. (%d, %d)\n",
+				rc, resp->error_code);
 			break;
 		}
 
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index cddf17d..6f72a1b 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -121,9 +121,11 @@  int bnxt_mq_rx_configure(struct bnxt *bp)
 		}
 		/* For each pool, allocate MACVLAN CFA rule & VNIC */
 		if (!pools) {
+			pools = RTE_MIN(bp->max_vnics,
+			    RTE_MIN(bp->max_l2_ctx,
+			     RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
 			RTE_LOG(ERR, PMD,
-				"VMDq pool not set, defaulted to 64\n");
-			pools = ETH_64_POOLS;
+			    "VMDq pool not set, defaulted to %d\n", pools);
 		}
 		nb_q_per_grp = bp->rx_cp_nr_rings / pools;
 		start_grp_id = 1;
@@ -134,7 +136,7 @@  int bnxt_mq_rx_configure(struct bnxt *bp)
 			vnic = bnxt_alloc_vnic(bp);
 			if (!vnic) {
 				RTE_LOG(ERR, PMD,
-					"VNIC alloc failed\n");
+					"VNIC %d alloc failed\n", i);
 				rc = -ENOMEM;
 				goto err_out;
 			}