@@ -142,3 +142,18 @@ mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
memset(pmd_mr, 0, sizeof(*pmd_mr));
}
}
+
+/**
+ * Set the reg_mr and dereg_mr callbacks.
+ *
+ * @param[out] reg_mr_cb
+ * Pointer to reg_mr func
+ * @param[out] dereg_mr_cb
+ * Pointer to dereg_mr func
+ */
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb)
+{
+ *reg_mr_cb = mlx5_common_verbs_reg_mr;
+ *dereg_mr_cb = mlx5_common_verbs_dereg_mr;
+}
@@ -199,7 +199,7 @@ mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
+static int
mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
{
if (bt == NULL) {
@@ -1044,6 +1044,29 @@ mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
mlx5_mr_garbage_collect(share_cache);
}
+/**
+ * Initialize global MR cache of a device.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
+{
+ /* Set the reg_mr and dereg_mr callback functions */
+ mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
+ &share_cache->dereg_mr_cb);
+ rte_rwlock_init(&share_cache->rwlock);
+ /* Initialize B-tree and allocate memory for global MR cache table. */
+ return mlx5_mr_btree_init(&share_cache->cache,
+ MLX5_MR_BTREE_CACHE_N * 2, socket);
+}
+
/**
* Flush all of the local cache entries.
*
@@ -128,8 +128,6 @@ __rte_internal
int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
int socket);
__rte_internal
-int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
-__rte_internal
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
__rte_internal
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
@@ -145,6 +143,8 @@ uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
__rte_internal
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
__rte_internal
+int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
+__rte_internal
void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
__rte_internal
void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
@@ -183,6 +183,10 @@ __rte_internal
void
mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
+__rte_internal
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
+
__rte_internal
void
mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
@@ -109,7 +109,7 @@ INTERNAL {
mlx5_mr_addr2mr_bh;
mlx5_mr_btree_dump;
mlx5_mr_btree_free;
- mlx5_mr_btree_init;
+ mlx5_mr_create_cache;
mlx5_mr_create_primary;
mlx5_mr_ctrl_init;
mlx5_mr_dump_cache;
@@ -136,10 +136,9 @@ INTERNAL {
mlx5_nl_vlan_vmwa_create; # WINDOWS_NO_EXPORT
mlx5_nl_vlan_vmwa_delete; # WINDOWS_NO_EXPORT
- mlx5_os_dereg_mr;
- mlx5_os_reg_mr;
mlx5_os_umem_dereg;
mlx5_os_umem_reg;
+ mlx5_os_set_reg_mr_cb;
mlx5_realloc;
@@ -317,7 +317,7 @@ mlx5_os_umem_dereg(void *pumem)
* @return
* 0 on successful registration, -1 otherwise
*/
-int
+static int
mlx5_os_reg_mr(void *pd,
void *addr, size_t length, struct mlx5_pmd_mr *pmd_mr)
{
@@ -365,7 +365,7 @@ mlx5_os_reg_mr(void *pd,
* @param[in] pmd_mr
* Pointer to PMD mr object
*/
-void
+static void
mlx5_os_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
{
if (pmd_mr && pmd_mr->mkey)
@@ -374,3 +374,19 @@ mlx5_os_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
claim_zero(mlx5_os_umem_dereg(pmd_mr->obj));
memset(pmd_mr, 0, sizeof(*pmd_mr));
}
+
+/**
+ * Set the reg_mr and dereg_mr callbacks.
+ *
+ * @param[out] reg_mr_cb
+ * Pointer to reg_mr func
+ * @param[out] dereg_mr_cb
+ * Pointer to dereg_mr func
+ *
+ */
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb)
+{
+ *reg_mr_cb = mlx5_os_reg_mr;
+ *dereg_mr_cb = mlx5_os_dereg_mr;
+}
@@ -253,9 +253,5 @@ __rte_internal
void *mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access);
__rte_internal
int mlx5_os_umem_dereg(void *pumem);
-__rte_internal
-int mlx5_os_reg_mr(void *pd,
- void *addr, size_t length, struct mlx5_pmd_mr *pmd_mr);
-__rte_internal
-void mlx5_os_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
+
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */
@@ -799,16 +799,13 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
rte_compressdev_pmd_destroy(priv->compressdev);
return -1;
}
- if (mlx5_mr_btree_init(&priv->mr_scache.cache,
- MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
+ if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
mlx5_compress_uar_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
- priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
/* Register callback function for global shared MR cache management. */
if (TAILQ_EMPTY(&mlx5_compress_priv_list))
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
@@ -974,16 +974,13 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
}
- if (mlx5_mr_btree_init(&priv->mr_scache.cache,
- MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
+ if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
mlx5_crypto_uar_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
- priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
priv->max_segs_num = devarg_prms.max_segs_num;
priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
@@ -2831,23 +2831,6 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
return 1;
}
-/**
- * Set the reg_mr and dereg_mr call backs
- *
- * @param reg_mr_cb[out]
- * Pointer to reg_mr func
- * @param dereg_mr_cb[out]
- * Pointer to dereg_mr func
- *
- */
-void
-mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
- mlx5_dereg_mr_t *dereg_mr_cb)
-{
- *reg_mr_cb = mlx5_mr_verbs_ops.reg_mr;
- *dereg_mr_cb = mlx5_mr_verbs_ops.dereg_mr;
-}
-
/**
* Remove a MAC address from device
*
@@ -26,48 +26,6 @@
#include <mlx5_utils.h>
#include <mlx5_malloc.h>
-/**
- * Register mr. Given protection domain pointer, pointer to addr and length
- * register the memory region.
- *
- * @param[in] pd
- * Pointer to protection domain context.
- * @param[in] addr
- * Pointer to memory start address.
- * @param[in] length
- * Length of the memory to register.
- * @param[out] pmd_mr
- * pmd_mr struct set with lkey, address, length and pointer to mr object
- *
- * @return
- * 0 on successful registration, -1 otherwise
- */
-static int
-mlx5_reg_mr(void *pd, void *addr, size_t length,
- struct mlx5_pmd_mr *pmd_mr)
-{
- return mlx5_common_verbs_reg_mr(pd, addr, length, pmd_mr);
-}
-
-/**
- * Deregister mr. Given the mlx5 pmd MR - deregister the MR
- *
- * @param[in] pmd_mr
- * pmd_mr struct set with lkey, address, length and pointer to mr object
- *
- */
-static void
-mlx5_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
-{
- mlx5_common_verbs_dereg_mr(pmd_mr);
-}
-
-/* verbs operations. */
-const struct mlx5_mr_ops mlx5_mr_verbs_ops = {
- .reg_mr = mlx5_reg_mr,
- .dereg_mr = mlx5_dereg_mr,
-};
-
/**
* Modify Rx WQ vlan stripping offload
*
@@ -12,7 +12,5 @@ void mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj);
int mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev);
void mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev);
-/* Verbs ops struct */
-extern const struct mlx5_mr_ops mlx5_mr_verbs_ops;
extern struct mlx5_obj_ops ibv_obj_ops;
#endif /* RTE_PMD_MLX5_VERBS_H_ */
@@ -1351,15 +1351,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
* At this point the device is not added to the memory
* event list yet, context is just being created.
*/
- err = mlx5_mr_btree_init(&sh->share_cache.cache,
- MLX5_MR_BTREE_CACHE_N * 2,
- sh->numa_node);
+ err = mlx5_mr_create_cache(&sh->share_cache, sh->numa_node);
if (err) {
err = rte_errno;
goto error;
}
- mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
- &sh->share_cache.dereg_mr_cb);
mlx5_os_dev_shared_handler_install(sh);
sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
if (!sh->cnt_id_tbl) {
@@ -1363,12 +1363,6 @@ struct mlx5_obj_ops {
#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
-/* MR operations structure. */
-struct mlx5_mr_ops {
- mlx5_reg_mr_t reg_mr;
- mlx5_dereg_mr_t dereg_mr;
-};
-
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
@@ -1768,8 +1762,6 @@ void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
int mlx5_os_net_probe(struct mlx5_common_device *cdev);
void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
-void mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
- mlx5_dereg_mr_t *dereg_mr_cb);
void mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
int mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
uint32_t index);
@@ -926,21 +926,4 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
return 0;
}
-/**
- * Set the reg_mr and dereg_mr call backs
- *
- * @param reg_mr_cb[out]
- * Pointer to reg_mr func
- * @param dereg_mr_cb[out]
- * Pointer to dereg_mr func
- *
- */
-void
-mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
- mlx5_dereg_mr_t *dereg_mr_cb)
-{
- *reg_mr_cb = mlx5_os_reg_mr;
- *dereg_mr_cb = mlx5_os_dereg_mr;
-}
-
const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
@@ -194,11 +194,7 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
priv->regexdev->device = cdev->dev;
priv->regexdev->data->dev_private = priv;
priv->regexdev->state = RTE_REGEXDEV_READY;
- priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
- priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
- ret = mlx5_mr_btree_init(&priv->mr_scache.cache,
- MLX5_MR_BTREE_CACHE_N * 2,
- rte_socket_id());
+ ret = mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id());
if (ret) {
DRV_LOG(ERR, "MR init tree failed.");
rte_errno = ENOMEM;