[v1,3/4] net/mlx5: add MR callbacks in per-device cache

Message ID 20200616094446.7152-4-ophirmu@mellanox.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series mlx5 MR refactor |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Ophir Munk June 16, 2020, 9:44 a.m. UTC
  Prior to this commit MR operations were verbs based and hard coded under
common/mlx5/linux directory. This commit enables upper layers (e.g.
net/mlx5) to determine which MR operations to use. For example the net
layer could set devx based MR operations in non-Linux environments. The
reg_mr and dereg_mr callbacks are added to the global per-device MR
cache 'struct mlx5_mr_share_cache'.

Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/common/mlx5/mlx5_common_mr.c | 23 ++++++++++++-----------
 drivers/common/mlx5/mlx5_common_mr.h | 14 ++++++++++++--
 drivers/net/mlx5/linux/mlx5_os.c     | 18 ++++++++++++++++++
 drivers/net/mlx5/mlx5.c              |  2 ++
 drivers/net/mlx5/mlx5.h              |  2 ++
 drivers/net/mlx5/mlx5_mr.c           |  6 ++++--
 6 files changed, 50 insertions(+), 15 deletions(-)
  

Patch

diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 4ce7975..564d618 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -435,12 +435,12 @@  mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
  *   Pointer to MR to free.
  */
 static void
-mr_free(struct mlx5_mr *mr)
+mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
 {
 	if (mr == NULL)
 		return;
 	DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
-	mlx5_common_verbs_dereg_mr(&mr->pmd_mr);
+	dereg_mr_cb(&mr->pmd_mr);
 	if (mr->ms_bmp != NULL)
 		rte_bitmap_free(mr->ms_bmp);
 	rte_free(mr);
@@ -490,7 +490,7 @@  mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
 		struct mlx5_mr *mr = mr_next;
 
 		mr_next = LIST_NEXT(mr, mr);
-		mr_free(mr);
+		mr_free(mr, share_cache->dereg_mr_cb);
 	}
 }
 
@@ -702,7 +702,7 @@  mlx5_mr_create_primary(void *pd,
 		data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
 		data.end = data.start + msl->page_sz;
 		rte_mcfg_mem_read_unlock();
-		mr_free(mr);
+		mr_free(mr, share_cache->dereg_mr_cb);
 		goto alloc_resources;
 	}
 	MLX5_ASSERT(data.msl == data_re.msl);
@@ -725,7 +725,7 @@  mlx5_mr_create_primary(void *pd,
 		 * Must be unlocked before calling rte_free() because
 		 * mlx5_mr_mem_event_free_cb() can be called inside.
 		 */
-		mr_free(mr);
+		mr_free(mr, share_cache->dereg_mr_cb);
 		return entry->lkey;
 	}
 	/*
@@ -760,12 +760,12 @@  mlx5_mr_create_primary(void *pd,
 	mr->ms_bmp_n = len / msl->page_sz;
 	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
 	/*
-	 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
-	 * called with holding the memory lock because it doesn't use
+	 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
+	 * be called with holding the memory lock because it doesn't use
 	 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
 	 * through mlx5_alloc_verbs_buf().
 	 */
-	mlx5_common_verbs_reg_mr(pd, (void *)data.start, len, &mr->pmd_mr);
+	share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
 	if (mr->pmd_mr.obj == NULL) {
 		DEBUG("Fail to create an MR for address (%p)",
 		      (void *)addr);
@@ -801,7 +801,7 @@  mlx5_mr_create_primary(void *pd,
 	 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
 	 * inside.
 	 */
-	mr_free(mr);
+	mr_free(mr, share_cache->dereg_mr_cb);
 	return UINT32_MAX;
 }
 
@@ -1028,7 +1028,8 @@  mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
  *   Pointer to MR structure on success, NULL otherwise.
  */
 struct mlx5_mr *
-mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id)
+mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
+		   mlx5_reg_mr_t reg_mr_cb)
 {
 	struct mlx5_mr *mr = NULL;
 
@@ -1038,7 +1039,7 @@  mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id)
 				RTE_CACHE_LINE_SIZE, socket_id);
 	if (mr == NULL)
 		return NULL;
-	mlx5_common_verbs_reg_mr(pd, (void *)addr, len, &mr->pmd_mr);
+	reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
 	if (mr->pmd_mr.obj == NULL) {
 		DRV_LOG(WARNING,
 			"Fail to create MR for address (%p)",
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index cdb8acf..b23ee66 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -38,6 +38,14 @@  struct mlx5_pmd_mr {
 	size_t		     len;
 	void		     *obj;  /* verbs mr object or devx umem object. */
 };
+
+/**
+ * mr operations typedef
+ */
+typedef int (*mlx5_reg_mr_t)(void *pd, void *addr, size_t length,
+			     struct mlx5_pmd_mr *pmd_mr);
+typedef void (*mlx5_dereg_mr_t)(struct mlx5_pmd_mr *pmd_mr);
+
 /* Memory Region object. */
 struct mlx5_mr {
 	LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
@@ -83,6 +91,8 @@  struct mlx5_mr_share_cache {
 	struct mlx5_mr_btree cache; /* Global MR cache table. */
 	struct mlx5_mr_list mr_list; /* Registered MR list. */
 	struct mlx5_mr_list mr_free_list; /* Freed MR list. */
+	mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */
+	mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
 } __rte_packed;
 
 /**
@@ -155,8 +165,8 @@  mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
 		    struct mr_cache_entry *entry, uintptr_t addr);
 __rte_internal
 struct mlx5_mr *
-mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len,
-		   int socket_id);
+mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
+		   mlx5_reg_mr_t reg_mr_cb);
 __rte_internal
 uint32_t
 mlx5_mr_create_primary(void *pd,
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 844c6c0..f498d00 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -42,6 +42,7 @@ 
 #include <mlx5_devx_cmds.h>
 #include <mlx5_common.h>
 #include <mlx5_common_mp.h>
+#include <mlx5_common_mr.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
@@ -2321,6 +2322,23 @@  mlx5_os_stats_init(struct rte_eth_dev *dev)
 	rte_free(strings);
 }
 
+/**
+ * Set the reg_mr and dereg_mr call backs
+ *
+ * @param reg_mr_cb[out]
+ *   Pointer to reg_mr func
+ * @param dereg_mr_cb[out]
+ *   Pointer to dereg_mr func
+ *
+ */
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
+		      mlx5_dereg_mr_t *dereg_mr_cb)
+{
+	*reg_mr_cb = mlx5_common_verbs_reg_mr;
+	*dereg_mr_cb = mlx5_common_verbs_dereg_mr;
+}
+
 const struct eth_dev_ops mlx5_os_dev_ops = {
 	.dev_configure = mlx5_dev_configure,
 	.dev_start = mlx5_dev_start,
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index b22acbb..5c86f6f 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -713,6 +713,8 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 		err = rte_errno;
 		goto error;
 	}
+	mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
+			      &sh->share_cache.dereg_mr_cb);
 	mlx5_os_dev_shared_handler_install(sh);
 	mlx5_flow_aging_init(sh);
 	mlx5_flow_counters_mng_init(sh);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 94a3667..5bd5acd 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -940,4 +940,6 @@  int mlx5_os_read_dev_stat(struct mlx5_priv *priv,
 int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats);
 int mlx5_os_get_stats_n(struct rte_eth_dev *dev);
 void mlx5_os_stats_init(struct rte_eth_dev *dev);
+void mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
+			   mlx5_dereg_mr_t *dereg_mr_cb);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index c91d6a4..adbe07c 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -276,7 +276,8 @@  mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
 		return;
 	DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
 		dev->data->port_id, mem_idx, mp->name);
-	mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id);
+	mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+				sh->share_cache.reg_mr_cb);
 	if (!mr) {
 		DRV_LOG(WARNING,
 			"port %u unable to allocate a new MR of"
@@ -350,7 +351,8 @@  mlx5_dma_map(struct rte_pci_device *pdev, void *addr,
 	}
 	priv = dev->data->dev_private;
 	sh = priv->sh;
-	mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY);
+	mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
+				sh->share_cache.reg_mr_cb);
 	if (!mr) {
 		DRV_LOG(WARNING,
 			"port %u unable to dma map", dev->data->port_id);