[2/7] net/bnxt: modify locking for representor Tx

Message ID 20240208171330.31139-3-ajit.khaparde@broadcom.com (mailing list archive)
State Accepted, archived
Delegated to: Ajit Khaparde
Headers
Series bnxt bug fixes |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Ajit Khaparde Feb. 8, 2024, 5:13 p.m. UTC
  Currently the representor Tx function is synchronized using a per
device lock. But that is not sufficient when there is simultaneous
traffic on the parent Tx ring and the representor rings.
Moreover the representor Tx is not protected from incursions by the
parent transmits. This can cause parent Tx threads to crossover into
the representor Tx contexts. Prevent this by using per TxQ locking and
protect not just representor Tx, but also the parent Tx using the lock.

Fixes: 6dc83230b43b ("net/bnxt: support port representor data path")

Cc: stable@gpgk.org
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Pete Spreadborough <peter.spreadborough@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |  1 -
 drivers/net/bnxt/bnxt_ethdev.c | 11 +----------
 drivers/net/bnxt/bnxt_reps.c   |  6 +++---
 drivers/net/bnxt/bnxt_txq.c    |  6 ++++++
 drivers/net/bnxt/bnxt_txq.h    |  1 +
 drivers/net/bnxt/bnxt_txr.c    | 13 +++++++++++++
 drivers/net/bnxt/bnxt_txr.h    |  4 +++-
 7 files changed, 27 insertions(+), 15 deletions(-)
  

Patch

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 42ecca35d7..3b3df6ba28 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -623,7 +623,6 @@  struct bnxt_mark_info {
 
 struct bnxt_rep_info {
 	struct rte_eth_dev	*vfr_eth_dev;
-	pthread_mutex_t		vfr_lock;
 	pthread_mutex_t		vfr_start_lock;
 	bool			conduit_valid;
 };
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index abe46e8004..fecc8666e8 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1803,10 +1803,8 @@  bnxt_uninit_locks(struct bnxt *bp)
 	pthread_mutex_destroy(&bp->def_cp_lock);
 	pthread_mutex_destroy(&bp->health_check_lock);
 	pthread_mutex_destroy(&bp->err_recovery_lock);
-	if (bp->rep_info) {
-		pthread_mutex_destroy(&bp->rep_info->vfr_lock);
+	if (bp->rep_info)
 		pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
-	}
 }
 
 static void bnxt_drv_uninit(struct bnxt *bp)
@@ -6534,13 +6532,6 @@  static int bnxt_init_rep_info(struct bnxt *bp)
 	for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
 		bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
 
-	rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
-	if (rc) {
-		PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
-		bnxt_free_rep_info(bp);
-		return rc;
-	}
-
 	rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
 	if (rc) {
 		PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index d96d972904..3a4720bc3c 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -124,8 +124,8 @@  bnxt_rep_tx_burst(void *tx_queue,
 	qid = vfr_txq->txq->queue_id;
 	vf_rep_bp = vfr_txq->bp;
 	parent = vf_rep_bp->parent_dev->data->dev_private;
-	pthread_mutex_lock(&parent->rep_info->vfr_lock);
 	ptxq = parent->tx_queues[qid];
+	pthread_mutex_lock(&ptxq->txq_lock);
 
 	ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
 
@@ -134,9 +134,9 @@  bnxt_rep_tx_burst(void *tx_queue,
 		vf_rep_bp->tx_pkts[qid]++;
 	}
 
-	rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
+	rc = _bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
 	ptxq->vfr_tx_cfa_action = 0;
-	pthread_mutex_unlock(&parent->rep_info->vfr_lock);
+	pthread_mutex_unlock(&ptxq->txq_lock);
 
 	return rc;
 }
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 7d91e88c9d..05032f7807 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -114,6 +114,7 @@  void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 		txq->mz = NULL;
 
 		rte_free(txq->free);
+		pthread_mutex_destroy(&txq->txq_lock);
 		rte_free(txq);
 		dev->data->tx_queues[queue_idx] = NULL;
 	}
@@ -197,6 +198,11 @@  int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 		goto err;
 	}
 
+	rc = pthread_mutex_init(&txq->txq_lock, NULL);
+	if (rc != 0) {
+		PMD_DRV_LOG(ERR, "TxQ mutex init failed!");
+		goto err;
+	}
 	return 0;
 err:
 	bnxt_tx_queue_release_op(eth_dev, queue_idx);
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index 3a483ad5c3..9e54985c4c 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -26,6 +26,7 @@  struct bnxt_tx_queue {
 	int			index;
 	int			tx_wake_thresh;
 	uint32_t		vfr_tx_cfa_action;
+	pthread_mutex_t		txq_lock;
 	struct bnxt_tx_ring_info	*tx_ring;
 
 	unsigned int		cp_nr_rings;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index d74d271d91..7fc44e989d 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -567,6 +567,19 @@  static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
 
 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			       uint16_t nb_pkts)
+{
+	struct bnxt_tx_queue *txq = tx_queue;
+	uint16_t rc;
+
+	pthread_mutex_lock(&txq->txq_lock);
+	rc = _bnxt_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+	pthread_mutex_unlock(&txq->txq_lock);
+
+	return rc;
+}
+
+uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+			 uint16_t nb_pkts)
 {
 	int rc;
 	uint16_t nb_tx_pkts = 0;
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index e64ea2c7d1..09078d545d 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -47,7 +47,9 @@  void bnxt_free_tx_rings(struct bnxt *bp);
 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-			       uint16_t nb_pkts);
+			uint16_t nb_pkts);
+uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+			 uint16_t nb_pkts);
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
 uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 			    uint16_t nb_pkts);