diff mbox series

[04/11] net/mlx5: split multiple packet Rq memory pool

Message ID 20210926111904.237736-5-xuemingl@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers show
Series net/mlx5: support shared Rx queue | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Xueming Li Sept. 26, 2021, 11:18 a.m. UTC
Port info is invisible from shared Rx queue, split MPR mempool from
device to Rx queue, also changed pool flag to mp_sc.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |   1 -
 drivers/net/mlx5/mlx5_rx.h      |   4 +-
 drivers/net/mlx5/mlx5_rxq.c     | 109 ++++++++++++--------------------
 drivers/net/mlx5/mlx5_trigger.c |  10 ++-
 4 files changed, 47 insertions(+), 77 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index f84e061fe71..3abb8c97e76 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1602,7 +1602,6 @@  mlx5_dev_close(struct rte_eth_dev *dev)
 		mlx5_drop_action_destroy(dev);
 	if (priv->mreg_cp_tbl)
 		mlx5_hlist_destroy(priv->mreg_cp_tbl);
-	mlx5_mprq_free_mp(dev);
 	if (priv->sh->ct_mng)
 		mlx5_flow_aso_ct_mng_close(priv->sh);
 	mlx5_os_free_shared_dr(priv);
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index d44c8078dea..a8e0c3162b0 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -179,8 +179,8 @@  struct mlx5_rxq_ctrl {
 extern uint8_t rss_hash_default_key[];
 
 unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);
-int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
-int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
+int mlx5_mprq_free_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl);
+int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl);
 int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
 int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
 int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 7e97cdd4bc0..14de8d0e6a4 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1087,7 +1087,7 @@  mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
 }
 
 /**
- * Free mempool of Multi-Packet RQ.
+ * Free RXQ mempool of Multi-Packet RQ.
  *
  * @param dev
  *   Pointer to Ethernet device.
@@ -1096,16 +1096,15 @@  mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
  *   0 on success, negative errno value on failure.
  */
 int
-mlx5_mprq_free_mp(struct rte_eth_dev *dev)
+mlx5_mprq_free_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_mempool *mp = priv->mprq_mp;
-	unsigned int i;
+	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+	struct rte_mempool *mp = rxq->mprq_mp;
 
 	if (mp == NULL)
 		return 0;
-	DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
-		dev->data->port_id, mp->name);
+	DRV_LOG(DEBUG, "port %u queue %hu freeing mempool (%s) for Multi-Packet RQ",
+		dev->data->port_id, rxq->idx, mp->name);
 	/*
 	 * If a buffer in the pool has been externally attached to a mbuf and it
 	 * is still in use by application, destroying the Rx queue can spoil
@@ -1123,34 +1122,28 @@  mlx5_mprq_free_mp(struct rte_eth_dev *dev)
 		return -rte_errno;
 	}
 	rte_mempool_free(mp);
-	/* Unset mempool for each Rx queue. */
-	for (i = 0; i != priv->rxqs_n; ++i) {
-		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-
-		if (rxq == NULL)
-			continue;
-		rxq->mprq_mp = NULL;
-	}
-	priv->mprq_mp = NULL;
+	rxq->mprq_mp = NULL;
 	return 0;
 }
 
 /**
- * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
- * mempool. If already allocated, reuse it if there're enough elements.
+ * Allocate RXQ a mempool for Multi-Packet RQ.
+ * If already allocated, reuse it if there're enough elements.
  * Otherwise, resize it.
  *
  * @param dev
  *   Pointer to Ethernet device.
+ * @param rxq_ctrl
+ *   Pointer to RXQ.
  *
  * @return
  *   0 on success, negative errno value on failure.
  */
 int
-mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+mlx5_mprq_alloc_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_mempool *mp = priv->mprq_mp;
+	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+	struct rte_mempool *mp = rxq->mprq_mp;
 	char name[RTE_MEMPOOL_NAMESIZE];
 	unsigned int desc = 0;
 	unsigned int buf_len;
@@ -1158,28 +1151,15 @@  mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
 	unsigned int obj_size;
 	unsigned int strd_num_n = 0;
 	unsigned int strd_sz_n = 0;
-	unsigned int i;
-	unsigned int n_ibv = 0;
 
-	if (!mlx5_mprq_enabled(dev))
+	if (rxq_ctrl == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
 		return 0;
-	/* Count the total number of descriptors configured. */
-	for (i = 0; i != priv->rxqs_n; ++i) {
-		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
-			(rxq, struct mlx5_rxq_ctrl, rxq);
-
-		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
-			continue;
-		n_ibv++;
-		desc += 1 << rxq->elts_n;
-		/* Get the max number of strides. */
-		if (strd_num_n < rxq->strd_num_n)
-			strd_num_n = rxq->strd_num_n;
-		/* Get the max size of a stride. */
-		if (strd_sz_n < rxq->strd_sz_n)
-			strd_sz_n = rxq->strd_sz_n;
-	}
+	/* Number of descriptors configured. */
+	desc = 1 << rxq->elts_n;
+	/* Get the max number of strides. */
+	strd_num_n = rxq->strd_num_n;
+	/* Get the max size of a stride. */
+	strd_sz_n = rxq->strd_sz_n;
 	MLX5_ASSERT(strd_num_n && strd_sz_n);
 	buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
 	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
@@ -1196,7 +1176,7 @@  mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
 	 * this Mempool gets available again.
 	 */
 	desc *= 4;
-	obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
+	obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ;
 	/*
 	 * rte_mempool_create_empty() has sanity check to refuse large cache
 	 * size compared to the number of elements.
@@ -1209,50 +1189,41 @@  mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
 		DRV_LOG(DEBUG, "port %u mempool %s is being reused",
 			dev->data->port_id, mp->name);
 		/* Reuse. */
-		goto exit;
-	} else if (mp != NULL) {
-		DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
-			dev->data->port_id, mp->name);
+		return 0;
+	}
+	if (mp != NULL) {
+		DRV_LOG(DEBUG, "port %u queue %u mempool %s should be resized, freeing it",
+			dev->data->port_id, rxq->idx, mp->name);
 		/*
 		 * If failed to free, which means it may be still in use, no way
 		 * but to keep using the existing one. On buffer underrun,
 		 * packets will be memcpy'd instead of external buffer
 		 * attachment.
 		 */
-		if (mlx5_mprq_free_mp(dev)) {
+		if (mlx5_mprq_free_mp(dev, rxq_ctrl) != 0) {
 			if (mp->elt_size >= obj_size)
-				goto exit;
+				return 0;
 			else
 				return -rte_errno;
 		}
 	}
-	snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
+	snprintf(name, sizeof(name), "port-%u-queue-%hu-mprq",
+		 dev->data->port_id, rxq->idx);
 	mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
 				0, NULL, NULL, mlx5_mprq_buf_init,
-				(void *)((uintptr_t)1 << strd_num_n),
-				dev->device->numa_node, 0);
+				(void *)(uintptr_t)(1 << strd_num_n),
+				dev->device->numa_node, MEMPOOL_F_SC_GET);
 	if (mp == NULL) {
 		DRV_LOG(ERR,
-			"port %u failed to allocate a mempool for"
+			"port %u queue %hu failed to allocate a mempool for"
 			" Multi-Packet RQ, count=%u, size=%u",
-			dev->data->port_id, obj_num, obj_size);
+			dev->data->port_id, rxq->idx, obj_num, obj_size);
 		rte_errno = ENOMEM;
 		return -rte_errno;
 	}
-	priv->mprq_mp = mp;
-exit:
-	/* Set mempool for each Rx queue. */
-	for (i = 0; i != priv->rxqs_n; ++i) {
-		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
-			(rxq, struct mlx5_rxq_ctrl, rxq);
-
-		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
-			continue;
-		rxq->mprq_mp = mp;
-	}
-	DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
-		dev->data->port_id);
+	rxq->mprq_mp = mp;
+	DRV_LOG(INFO, "port %u queue %hu Multi-Packet RQ is configured",
+		dev->data->port_id, rxq->idx);
 	return 0;
 }
 
@@ -1717,8 +1688,10 @@  mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 		dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 	if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
-		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
 			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+			mlx5_mprq_free_mp(dev, rxq_ctrl);
+		}
 		LIST_REMOVE(rxq_ctrl, next);
 		mlx5_free(rxq_ctrl);
 		(*priv->rxqs)[idx] = NULL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index c3adf5082e6..0753dbad053 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -138,11 +138,6 @@  mlx5_rxq_start(struct rte_eth_dev *dev)
 	unsigned int i;
 	int ret = 0;
 
-	/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
-	if (mlx5_mprq_alloc_mp(dev)) {
-		/* Should not release Rx queues but return immediately. */
-		return -rte_errno;
-	}
 	DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
 		dev->data->port_id, priv->sh->device_attr.max_qp_wr);
 	DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
@@ -153,8 +148,11 @@  mlx5_rxq_start(struct rte_eth_dev *dev)
 		if (!rxq_ctrl)
 			continue;
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-			/* Pre-register Rx mempools. */
 			if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+				/* Allocate/reuse/resize mempool for MPRQ. */
+				if (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)
+					goto error;
+				/* Pre-register Rx mempools. */
 				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
 						  rxq_ctrl->rxq.mprq_mp);
 			} else {