[v2] common/mlx5: fix MPRQ mempool registration

Message ID 20211116115545.2441025-1-dkozlyuk@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series [v2] common/mlx5: fix MPRQ mempool registration |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/github-robot: build success github build: passed
ci/intel-Testing success Testing PASS
ci/iol-testing warning apply patch failure

Commit Message

Dmitry Kozlyuk Nov. 16, 2021, 11:55 a.m. UTC
  Mempool registration code had a wrong assumption
that it is always dealing with packet mempools
and always called rte_pktmbuf_priv_flags(),
which returned a random value for different types of mempools.
In particular, it could consider MPRQ mempools
as having externally pinned buffers, which is wrong.
Packet mempools cannot be reliably recognized,
but it is sufficient to check that the mempool is not a packet one,
so it cannot have externally pinned buffers.
Compare mempool private data size to that of packet mempools to check.

Fixes: 690b2a88c2f7 ("common/mlx5: add mempool registration facilities")
Fixes: fec28ca0e3a9 ("net/mlx5: support mempool registration")

Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
v2: improve logging, add ack.

 drivers/common/mlx5/mlx5_common.c    |  3 +--
 drivers/common/mlx5/mlx5_common_mr.c |  5 ++++-
 drivers/common/mlx5/mlx5_common_mr.h | 10 ++++++++++
 drivers/net/mlx5/mlx5_trigger.c      |  5 +++--
 4 files changed, 18 insertions(+), 5 deletions(-)
  

Comments

Raslan Darawsheh Nov. 16, 2021, 4:04 p.m. UTC | #1
Hi,
> -----Original Message-----
> From: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
> Sent: Tuesday, November 16, 2021 1:56 PM
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: [PATCH v2] common/mlx5: fix MPRQ mempool registration
> 
> Mempool registration code had a wrong assumption
> that it is always dealing with packet mempools
> and always called rte_pktmbuf_priv_flags(),
> which returned a random value for different types of mempools.
> In particular, it could consider MPRQ mempools
> as having externally pinned buffers, which is wrong.
> Packet mempools cannot be reliably recognized,
> but it is sufficient to check that the mempool is not a packet one,
> so it cannot have externally pinned buffers.
> Compare mempool private data size to that of packet mempools to check.
> 
> Fixes: 690b2a88c2f7 ("common/mlx5: add mempool registration facilities")
> Fixes: fec28ca0e3a9 ("net/mlx5: support mempool registration")
> 
> Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
> v2: improve logging, add ack.

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh
  

Patch

diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c
index b9ed5ee676..66c2c08b7d 100644
--- a/drivers/common/mlx5/mlx5_common.c
+++ b/drivers/common/mlx5/mlx5_common.c
@@ -390,8 +390,7 @@  mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
 			  void *arg)
 {
 	struct mlx5_common_device *cdev = arg;
-	bool extmem = rte_pktmbuf_priv_flags(mp) &
-		      RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
+	bool extmem = mlx5_mempool_is_extmem(mp);
 
 	switch (event) {
 	case RTE_MEMPOOL_EVENT_READY:
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 49feea4474..a7a499f6f9 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -1302,6 +1302,7 @@  mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
 	struct mlx5_range *chunks;
 	unsigned int n;
 
+	DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name);
 	n = mp->nb_mem_chunks;
 	chunks = calloc(sizeof(chunks[0]), n);
 	if (chunks == NULL)
@@ -1382,6 +1383,8 @@  mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
 {
 	struct mlx5_mempool_get_extmem_data data;
 
+	DRV_LOG(DEBUG, "Recovering external pinned pages of mempool %s",
+		mp->name);
 	memset(&data, 0, sizeof(data));
 	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, &data);
 	if (data.ret < 0)
@@ -1414,7 +1417,7 @@  mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
 	int ret;
 
 	/* Collect the pool underlying memory. */
-	ret = (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) ?
+	ret = mlx5_mempool_is_extmem(mp) ?
 	      mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
 	      mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
 	if (ret < 0)
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index dc7ddc3513..442b9d4694 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -263,4 +263,14 @@  int
 mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
 			   struct rte_mempool *mp);
 
+/** Check if @p mp has buffers pinned in external memory. */
+static inline bool
+mlx5_mempool_is_extmem(struct rte_mempool *mp)
+{
+	return (mp->private_data_size ==
+		sizeof(struct rte_pktmbuf_pool_private)) &&
+	       (mp->elt_size >= sizeof(struct rte_mbuf)) &&
+	       (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF);
+}
+
 #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 1952d68444..4440a765d9 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -149,13 +149,14 @@  mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
 		uint32_t flags;
 
 		mp = rxq_ctrl->rxq.rxseg[s].mp;
-		flags = rte_pktmbuf_priv_flags(mp);
+		flags = mp != rxq_ctrl->rxq.mprq_mp ?
+			rte_pktmbuf_priv_flags(mp) : 0;
 		ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp);
 		if (ret < 0 && rte_errno != EEXIST)
 			return ret;
 		if ((flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) == 0)
 			rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
-					&rxq_ctrl->rxq);
+					     &rxq_ctrl->rxq);
 	}
 	return 0;
 }