diff mbox series

[2/4] net/mlx5: fix Tx queue release

Message ID 1602743893-345348-2-git-send-email-matan@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers show
Series [1/4] net/mlx5: fix Rx queue release | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Matan Azrad Oct. 15, 2020, 6:38 a.m. UTC
The HW objects of the Tx queue is created/destroyed in the device
start\stop stage while the ethdev configurations for the Tx queue
starts from the tx_queue_setup stage.
The PMD should save all the last configurations it got from the ethdev
and to apply them to the device in the dev_start operation.

Wrongly, last code added to mitigate the reference counters didn't take
into account the above rule and combined the configurations and HW
objects to be created\destroyed together.

This causes to memory leak and other memory issues.

Make sure the HW object is released in stop operation when there is no
any reference to it while the configurations stay saved.

Fixes: 17a57183c0eb ("net/mlx5: mitigate Tx queue reference counters")

Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.h |  2 +-
 drivers/net/mlx5/mlx5_txq.c  | 21 ++++++++++++---------
 2 files changed, 13 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index c3734e3..b243b6f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -269,7 +269,7 @@  enum mlx5_txq_type {
 /* TX queue control descriptor. */
 struct mlx5_txq_ctrl {
 	LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
-	rte_atomic32_t refcnt; /* Reference counter. */
+	uint32_t refcnt; /* Reference counter. */
 	unsigned int socket; /* CPU socket ID for allocations. */
 	enum mlx5_txq_type type; /* The txq ctrl type. */
 	unsigned int max_inline_data; /* Max inline data. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index af84f5f..9c2dd2a 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1121,7 +1121,7 @@  struct mlx5_txq_ctrl *
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	rte_atomic32_inc(&tmpl->refcnt);
+	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
 	tmpl->type = MLX5_TXQ_TYPE_STANDARD;
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
@@ -1165,7 +1165,7 @@  struct mlx5_txq_ctrl *
 	tmpl->txq.idx = idx;
 	tmpl->hairpin_conf = *hairpin_conf;
 	tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
-	rte_atomic32_inc(&tmpl->refcnt);
+	__atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
 }
@@ -1190,7 +1190,7 @@  struct mlx5_txq_ctrl *
 
 	if (txq_data) {
 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
-		rte_atomic32_inc(&ctrl->refcnt);
+		__atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
 	}
 	return ctrl;
 }
@@ -1215,7 +1215,7 @@  struct mlx5_txq_ctrl *
 	if (!(*priv->txqs)[idx])
 		return 0;
 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-	if (!rte_atomic32_dec_and_test(&txq_ctrl->refcnt))
+	if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
 		return 1;
 	if (txq_ctrl->obj) {
 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1229,12 +1229,15 @@  struct mlx5_txq_ctrl *
 			txq_ctrl->txq.fcqs = NULL;
 		}
 		txq_free_elts(txq_ctrl);
-		mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
 	}
-	LIST_REMOVE(txq_ctrl, next);
-	mlx5_free(txq_ctrl);
-	(*priv->txqs)[idx] = NULL;
 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+		if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
+			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
+		LIST_REMOVE(txq_ctrl, next);
+		mlx5_free(txq_ctrl);
+		(*priv->txqs)[idx] = NULL;
+	}
 	return 0;
 }
 
@@ -1258,7 +1261,7 @@  struct mlx5_txq_ctrl *
 	if (!(*priv->txqs)[idx])
 		return -1;
 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-	return (rte_atomic32_read(&txq->refcnt) == 1);
+	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
 }
 
 /**