[dpdk-dev,2/3] net/bnxt: fix number of mac addresses for VMDQ support

Message ID 20171005142803.2143-3-ajit.khaparde@broadcom.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply patch file failure

Commit Message

Ajit Khaparde Oct. 5, 2017, 2:28 p.m. UTC
  We were hardcoding the max MAC addresses to 32, while the HW
can support more than that. This was restricting the number of VMDQ
pools that we could support. Use the value obtained from FW instead.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_ethdev.c |  6 +++---
 drivers/net/bnxt/bnxt_hwrm.c   |  2 +-
 drivers/net/bnxt/bnxt_rxq.c    | 23 +++++++++++++++++++----
 3 files changed, 23 insertions(+), 8 deletions(-)
  

Patch

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index ef5c8cc99..aae032707 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -416,7 +416,7 @@  static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 
 	/* MAC Specifics */
-	dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
+	dev_info->max_mac_addrs = bp->max_l2_ctx;
 	dev_info->max_hash_mac_addrs = 0;
 
 	/* PF/VF specifics */
@@ -2525,11 +2525,11 @@  bnxt_dev_init(struct rte_eth_dev *eth_dev)
 		goto error_free;
 	}
 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
-					ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
+					ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
 		RTE_LOG(ERR, PMD,
 			"Failed to alloc %u bytes needed to store MAC addr tbl",
-			ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
+			ETHER_ADDR_LEN * bp->max_l2_ctx);
 		rc = -ENOMEM;
 		goto error_free;
 	}
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d379850bb..e66994a76 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -259,7 +259,7 @@  int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
 	 * by ethtool.
 	 */
 	if (vnic->flags & BNXT_VNIC_INFO_BCAST)
-		mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
 	if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
 	if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 0c4e0f6ec..bfa5308ad 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -60,6 +60,8 @@  void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
 int bnxt_mq_rx_configure(struct bnxt *bp)
 {
 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+	const struct rte_eth_vmdq_rx_conf *conf =
+		    &dev_conf->rx_adv_conf.vmdq_rx_conf;
 	unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
 	int start_grp_id, end_grp_id = 1, rc = 0;
 	struct bnxt_vnic_info *vnic;
@@ -102,9 +104,6 @@  int bnxt_mq_rx_configure(struct bnxt *bp)
 	/* Multi-queue mode */
 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
 		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
-		const struct rte_eth_vmdq_rx_conf *conf =
-		    &dev_conf->rx_adv_conf.vmdq_rx_conf;
-
 
 		switch (dev_conf->rxmode.mq_mode) {
 		case ETH_MQ_RX_VMDQ_RSS:
@@ -156,8 +155,13 @@  int bnxt_mq_rx_configure(struct bnxt *bp)
 			rxq = bp->eth_dev->data->rx_queues[ring_idx];
 			rxq->vnic = vnic;
 		}
-		if (i == 0)
+		if (i == 0) {
+			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+				bp->eth_dev->data->promiscuous = 1;
+				vnic->flags |= BNXT_VNIC_INFO_PROMISC;
+			}
 			vnic->func_default = true;
+		}
 		vnic->ff_pool_idx = i;
 		vnic->start_grp_id = start_grp_id;
 		vnic->end_grp_id = end_grp_id;
@@ -172,6 +176,17 @@  int bnxt_mq_rx_configure(struct bnxt *bp)
 			rc = -ENOMEM;
 			goto err_out;
 		}
+		for (j = 0; j < conf->nb_pool_maps; j++) {
+			if (conf->pool_map[j].pools & (1UL << i)) {
+				RTE_LOG(ERR, PMD,
+					"Add vlan %u to vmdq pool %u\n",
+					conf->pool_map[j].vlan_id, i);
+
+				filter->l2_ivlan = conf->pool_map[j].vlan_id;
+				filter->enables |=
+				HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+			}
+		}
 		/*
 		 * TODO: Configure & associate CFA rule for
 		 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC