[14/18] net/bnxt: add tunnel TPA support

Message ID 20231221180529.18687-15-ajit.khaparde@broadcom.com (mailing list archive)
State Superseded, archived
Delegated to: Ajit Khaparde
Headers
Series bnxt patchset |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Ajit Khaparde Dec. 21, 2023, 6:05 p.m. UTC
  From: Damodharam Ammepalli <damodharam.ammepalli@broadcom.com>

This patch adds support for tunnel TPA type.
The tunnel TPA support is brought in by the updated bit_field
tnl_tpa_en(4) in hwrm_vnic_tpa_cfg_input->enables,
which is used by the firmware to indicate the capability
of the underlying hardware.

This patch updates hwrm HWRM_VNIC_TPA_CFG request
for vxlan, geneve and default tunnel type bit_fields.

The patch also updates to use the V3 TPA completion which
the P7 devices support.

Signed-off-by: Damodharam Ammepalli <damodharam.ammepalli@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt.h      | 13 +++++++
 drivers/net/bnxt/bnxt_hwrm.c | 71 ++++++++++++++++++++++++++++++++++++
 drivers/net/bnxt/bnxt_rxr.c  |  9 +++--
 drivers/net/bnxt/bnxt_vnic.c | 16 ++++++++
 4 files changed, 106 insertions(+), 3 deletions(-)
  

Patch

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 576688bbff..eb3142cf09 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -119,6 +119,18 @@ 
 	(BNXT_CHIP_P5_P7(bp) ? TPA_MAX_SEGS_TH : \
 			      TPA_MAX_SEGS)
 
+/* Handle TPA aggs and segs dynamically as per spec & dpdk mbuf frags */
+#define MAX_PAGES		65536
+#define PAGE_SHIFT              12
+/* pagesize is 1UL << PAGE_SHIFT */
+#define PAGE_SIZE               BIT(12)
+#define PAGE_MASK               (~(PAGE_SIZE - 1))
+#if ((MAX_PAGES / PAGE_SIZE + 1) < 16)
+#define MAX_MBUF_FRAGS 16UL
+#else
+#define MAX_MBUF_FRAGS (MAX_PAGES / PAGE_SIZE + 1)
+#endif
+
 /*
  * Define the number of async completion rings to be used. Set to zero for
  * configurations in which the maximum number of packet completion rings
@@ -815,6 +827,7 @@  struct bnxt {
 #define BNXT_VNIC_CAP_ESP_SPI6_CAP	BIT(12)
 #define BNXT_VNIC_CAP_AH_SPI_CAP	(BNXT_VNIC_CAP_AH_SPI4_CAP | BNXT_VNIC_CAP_AH_SPI6_CAP)
 #define BNXT_VNIC_CAP_ESP_SPI_CAP	(BNXT_VNIC_CAP_ESP_SPI4_CAP | BNXT_VNIC_CAP_ESP_SPI6_CAP)
+#define BNXT_VNIC_CAP_VNIC_TUNNEL_TPA	BIT(13)
 
 	unsigned int		rx_nr_rings;
 	unsigned int		rx_cp_nr_rings;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 3c16abea69..0165a534af 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -1046,6 +1046,9 @@  int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_ESP_SPI6_CAP;
 
+	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_HW_TUNNEL_TPA_CAP)
+		bp->vnic_cap_flags |= BNXT_VNIC_CAP_VNIC_TUNNEL_TPA;
+
 	bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
 
 	HWRM_UNLOCK();
@@ -2666,6 +2669,30 @@  int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
 	return rc;
 }
 
+#define BNXT_DFLT_TUNL_TPA_BMAP					\
+	(HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE |	\
+	 HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV4 |	\
+	 HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnxt_vnic_update_tunl_tpa_bmap(struct bnxt *bp,
+					   struct hwrm_vnic_tpa_cfg_input *req)
+{
+	uint32_t tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
+
+	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_VNIC_TUNNEL_TPA))
+		return;
+
+	if (bp->vxlan_port_cnt)
+		tunl_tpa_bmap |= HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN |
+			HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+
+	if (bp->geneve_port_cnt)
+		tunl_tpa_bmap |= HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GENEVE;
+
+	req->enables |= rte_cpu_to_le_32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_TNL_TPA_EN);
+	req->tnl_tpa_en_bitmap = rte_cpu_to_le_32(tunl_tpa_bmap);
+}
+
 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
 			struct bnxt_vnic_info *vnic, bool enable)
 {
@@ -2714,6 +2741,26 @@  int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
 
 		if (BNXT_CHIP_P5_P7(bp))
 			req.max_aggs = rte_cpu_to_le_16(bp->max_tpa_v2);
+
+		/* For tpa v2 handle as per spec mss and log2 units */
+		if (BNXT_CHIP_P7(bp)) {
+			uint32_t nsegs, n, segs = 0;
+			uint16_t mss = bp->eth_dev->data->mtu - 40;
+
+			/* Calculate the number of segs based on mss */
+			if (mss <= PAGE_SIZE) {
+				n = PAGE_SIZE / mss;
+				nsegs = (MAX_MBUF_FRAGS - 1) * n;
+			} else {
+				n = mss / PAGE_SIZE;
+				if (mss & (PAGE_SIZE - 1))
+					n++;
+				nsegs = (MAX_MBUF_FRAGS - n) / n;
+			}
+			segs = rte_log2_u32(nsegs);
+			req.max_agg_segs = rte_cpu_to_le_16(segs);
+		}
+		bnxt_vnic_update_tunl_tpa_bmap(bp, &req);
 	}
 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
@@ -4242,6 +4289,27 @@  int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
 	return rc;
 }
 
+static int bnxt_hwrm_set_tpa(struct bnxt *bp)
+{
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+	uint64_t rx_offloads = dev_conf->rxmode.offloads;
+	bool tpa_flags = 0;
+	int rc, i;
+
+	tpa_flags = (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?  true : false;
+	for (i = 0; i < bp->max_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+			continue;
+
+		rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, tpa_flags);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
 				uint8_t tunnel_type)
 {
@@ -4278,6 +4346,8 @@  int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
 
 	HWRM_UNLOCK();
 
+	bnxt_hwrm_set_tpa(bp);
+
 	return rc;
 }
 
@@ -4346,6 +4416,7 @@  int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
 		bp->ecpri_port_cnt = 0;
 	}
 
+	bnxt_hwrm_set_tpa(bp);
 	return rc;
 }
 
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index d0706874a6..3542975600 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -153,7 +153,8 @@  static void bnxt_rx_ring_reset(void *arg)
 		rxr = rxq->rx_ring;
 		/* Disable and flush TPA before resetting the RX ring */
 		if (rxr->tpa_info)
-			bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, false);
+			bnxt_vnic_tpa_cfg(bp, rxq->queue_id, false);
+
 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
 		if (rc) {
 			PMD_DRV_LOG(ERR, "Rx ring%d reset failed\n", i);
@@ -163,12 +164,13 @@  static void bnxt_rx_ring_reset(void *arg)
 		bnxt_rx_queue_release_mbufs(rxq);
 		rxr->rx_raw_prod = 0;
 		rxr->ag_raw_prod = 0;
+		rxr->ag_cons = 0;
 		rxr->rx_next_cons = 0;
 		bnxt_init_one_rx_ring(rxq);
 		bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
 		bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
 		if (rxr->tpa_info)
-			bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, true);
+			bnxt_vnic_tpa_cfg(bp, rxq->queue_id, true);
 
 		rxq->in_reset = 0;
 	}
@@ -1151,7 +1153,8 @@  static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
 		return -EBUSY;
 
 	if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START ||
-	    cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2) {
+	    cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2 ||
+	    cmp_type == RX_TPA_START_V3_CMPL_TYPE_RX_TPA_START_V3) {
 		bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
 			       (struct rx_tpa_start_cmpl_hi *)rxcmp1);
 		rc = -EINVAL; /* Continue w/o new mbuf */
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5ea34f7cb6..5092a7d774 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -464,7 +464,9 @@  bnxt_vnic_queue_delete(struct bnxt *bp, uint16_t vnic_idx)
 static struct bnxt_vnic_info*
 bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
 {
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+	uint64_t rx_offloads = dev_conf->rxmode.offloads;
 	struct bnxt_vnic_info *vnic;
 	struct bnxt_rx_queue *rxq = NULL;
 	int32_t rc = -EINVAL;
@@ -523,6 +525,12 @@  bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
 		goto cleanup;
 	}
 
+	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
+				   (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
+				    true : false);
+	if (rc)
+		PMD_DRV_LOG(DEBUG, "Failed to configure TPA on this vnic %d\n", q_index);
+
 	rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 	if (rc) {
 		PMD_DRV_LOG(DEBUG, "Failed to configure vnic plcmode %d\n",
@@ -658,7 +666,9 @@  bnxt_vnic_rss_create(struct bnxt *bp,
 		     struct bnxt_vnic_rss_info *rss_info,
 		     uint16_t vnic_id)
 {
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+	uint64_t rx_offloads = dev_conf->rxmode.offloads;
 	struct bnxt_vnic_info *vnic;
 	struct bnxt_rx_queue *rxq = NULL;
 	uint32_t idx, nr_ctxs, config_rss = 0;
@@ -741,6 +751,12 @@  bnxt_vnic_rss_create(struct bnxt *bp,
 		goto fail_cleanup;
 	}
 
+	rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
+				   (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
+				    true : false);
+	if (rc)
+		PMD_DRV_LOG(DEBUG, "Failed to configure TPA on this vnic %d\n", idx);
+
 	rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
 	if (rc) {
 		PMD_DRV_LOG(ERR, "Failed to configure vnic plcmode %d\n",