@@ -406,6 +406,49 @@ mlx5_glue_constructor(void)
mlx5_glue = NULL;
}
+/**
+ * Allocate Protection Domain object and extract its pdn using DV API.
+ *
+ * @param[out] cdev
+ * Pointer to the mlx5 device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_os_pd_create(struct mlx5_common_device *cdev)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5dv_obj obj;
+ struct mlx5dv_pd pd_info;
+ int ret;
+#endif
+
+ cdev->pd = mlx5_glue->alloc_pd(cdev->ctx);
+ if (cdev->pd == NULL) {
+ DRV_LOG(ERR, "Failed to allocate PD.");
+ return errno ? -errno : -ENOMEM;
+ }
+ if (cdev->config.devx == 0)
+ return 0;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ obj.pd.in = cdev->pd;
+ obj.pd.out = &pd_info;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Fail to get PD object info.");
+ mlx5_glue->dealloc_pd(cdev->pd);
+ cdev->pd = NULL;
+ return -errno;
+ }
+ cdev->pdn = pd_info.pdn;
+ return 0;
+#else
+ DRV_LOG(ERR, "Cannot get pdn - no DV support.");
+ return -ENOTSUP;
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+}
+
static struct ibv_device *
mlx5_os_get_ibv_device(const struct rte_pci_addr *addr)
{
@@ -203,14 +203,6 @@ mlx5_os_get_devx_uar_page_id(void *uar)
#endif
}
-__rte_internal
-static inline void *
-mlx5_os_alloc_pd(void *ctx)
-{
- return mlx5_glue->alloc_pd(ctx);
-}
-
-__rte_internal
static inline int
mlx5_os_dealloc_pd(void *pd)
{
@@ -320,6 +320,10 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
static void
mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
{
+ if (cdev->pd != NULL) {
+ claim_zero(mlx5_os_dealloc_pd(cdev->pd));
+ cdev->pd = NULL;
+ }
if (cdev->ctx != NULL) {
claim_zero(mlx5_glue->close_device(cdev->ctx));
cdev->ctx = NULL;
@@ -346,7 +350,14 @@ mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
ret = mlx5_os_open_device(cdev, classes);
if (ret < 0)
return ret;
+ /* Allocate Protection Domain object and extract its pdn. */
+ ret = mlx5_os_pd_create(cdev);
+ if (ret)
+ goto error;
return 0;
+error:
+ mlx5_dev_hw_global_release(cdev);
+ return ret;
}
static void
@@ -346,6 +346,8 @@ struct mlx5_common_device {
TAILQ_ENTRY(mlx5_common_device) next;
uint32_t classes_loaded;
void *ctx; /* Verbs/DV/DevX context. */
+ void *pd; /* Protection Domain. */
+ uint32_t pdn; /* Protection Domain Number. */
struct mlx5_common_dev_config config; /* Device configuration. */
};
@@ -447,5 +449,6 @@ mlx5_dev_is_pci(const struct rte_device *dev);
/* mlx5_common_os.c */
int mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes);
+int mlx5_os_pd_create(struct mlx5_common_device *cdev);
#endif /* RTE_PMD_MLX5_COMMON_H_ */
@@ -135,8 +135,6 @@ INTERNAL {
mlx5_nl_vlan_vmwa_create; # WINDOWS_NO_EXPORT
mlx5_nl_vlan_vmwa_delete; # WINDOWS_NO_EXPORT
- mlx5_os_alloc_pd;
- mlx5_os_dealloc_pd;
mlx5_os_dereg_mr;
mlx5_os_reg_mr;
mlx5_os_umem_dereg;
@@ -25,35 +25,6 @@ mlx5_glue_constructor(void)
{
}
-/**
- * Allocate PD. Given a DevX context object
- * return an mlx5-pd object.
- *
- * @param[in] ctx
- * Pointer to context.
- *
- * @return
- * The mlx5_pd if pd is valid, NULL and errno otherwise.
- */
-void *
-mlx5_os_alloc_pd(void *ctx)
-{
- struct mlx5_pd *ppd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_pd),
- 0, SOCKET_ID_ANY);
- if (!ppd)
- return NULL;
-
- struct mlx5_devx_obj *obj = mlx5_devx_cmd_alloc_pd(ctx);
- if (!obj) {
- mlx5_free(ppd);
- return NULL;
- }
- ppd->obj = obj;
- ppd->pdn = obj->id;
- ppd->devx_ctx = ctx;
- return ppd;
-}
-
/**
* Release PD. Releases a given mlx5_pd object
*
@@ -73,6 +44,36 @@ mlx5_os_dealloc_pd(void *pd)
return 0;
}
+/**
+ * Allocate Protection Domain object and extract its pdn using DV API.
+ *
+ * @param[out] dev
+ * Pointer to the mlx5 device.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_os_pd_create(struct mlx5_common_device *cdev)
+{
+ struct mlx5_pd *pd;
+
+ pd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pd), 0, SOCKET_ID_ANY);
+ if (!pd)
+ return -1;
+ struct mlx5_devx_obj *obj = mlx5_devx_cmd_alloc_pd(cdev->ctx);
+ if (!obj) {
+ mlx5_free(pd);
+ return -1;
+ }
+ pd->obj = obj;
+ pd->pdn = obj->id;
+ pd->devx_ctx = cdev->ctx;
+ cdev->pd = pd;
+ cdev->pdn = pd->pdn;
+ return 0;
+}
+
/**
* Detect if a devx_device_bdf object has identical DBDF values to the
* rte_pci_addr found in bus/pci probing.
@@ -248,9 +248,6 @@ mlx5_os_devx_subscribe_devx_event(void *eventc,
return -ENOTSUP;
}
-__rte_internal
-void *mlx5_os_alloc_pd(void *ctx);
-__rte_internal
int mlx5_os_dealloc_pd(void *pd);
__rte_internal
void *mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access);
@@ -38,11 +38,9 @@ struct mlx5_compress_priv {
struct rte_compressdev *compressdev;
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
void *uar;
- uint32_t pdn; /* Protection Domain number. */
uint8_t min_block_size;
uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
/* Minimum huffman block size supported by the device. */
- struct ibv_pd *pd;
struct rte_compressdev_config dev_config;
LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
rte_spinlock_t xform_sl;
@@ -190,7 +188,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
};
struct mlx5_devx_qp_attr qp_attr = {
- .pd = priv->pdn,
+ .pd = priv->cdev->pdn,
.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar),
.user_index = qp_id,
};
@@ -230,7 +228,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
qp->priv = priv;
qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
RTE_CACHE_LINE_SIZE);
- if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
+ if (mlx5_common_verbs_reg_mr(priv->cdev->pd, opaq_buf, qp->entries_n *
sizeof(struct mlx5_gga_compress_opaque),
&qp->opaque_mr) != 0) {
rte_free(opaq_buf);
@@ -469,8 +467,8 @@ mlx5_compress_addr2mr(struct mlx5_compress_priv *priv, uintptr_t addr,
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(ol_flags & EXT_ATTACHED_MBUF));
+ return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+ addr, !!(ol_flags & EXT_ATTACHED_MBUF));
}
static __rte_always_inline uint32_t
@@ -691,12 +689,8 @@ mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
}
static void
-mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
+mlx5_compress_uar_release(struct mlx5_compress_priv *priv)
{
- if (priv->pd != NULL) {
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
- priv->pd = NULL;
- }
if (priv->uar != NULL) {
mlx5_glue->devx_free_uar(priv->uar);
priv->uar = NULL;
@@ -704,46 +698,12 @@ mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
}
static int
-mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
+mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)
{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret;
-
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (priv->pd == NULL) {
- DRV_LOG(ERR, "Failed to allocate PD.");
- return errno ? -errno : -ENOMEM;
- }
- obj.pd.in = priv->pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret != 0) {
- DRV_LOG(ERR, "Fail to get PD object info.");
- mlx5_glue->dealloc_pd(priv->pd);
- priv->pd = NULL;
- return -errno;
- }
- priv->pdn = pd_info.pdn;
- return 0;
-#else
- (void)priv;
- DRV_LOG(ERR, "Cannot get pdn - no DV support.");
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
-static int
-mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
-{
- if (mlx5_compress_pd_create(priv) != 0)
- return -1;
priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
NULL) {
rte_errno = errno;
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
DRV_LOG(ERR, "Failed to allocate UAR.");
return -1;
}
@@ -839,14 +799,14 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
priv->compressdev = compressdev;
priv->min_block_size = att.compress_min_block_size;
priv->qp_ts_format = att.qp_ts_format;
- if (mlx5_compress_hw_global_prepare(priv) != 0) {
+ if (mlx5_compress_uar_prepare(priv) != 0) {
rte_compressdev_pmd_destroy(priv->compressdev);
return -1;
}
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
- mlx5_compress_hw_global_release(priv);
+ mlx5_compress_uar_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
rte_errno = ENOMEM;
return -rte_errno;
@@ -881,7 +841,7 @@ mlx5_compress_dev_remove(struct mlx5_common_device *cdev)
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
NULL);
mlx5_mr_release_cache(&priv->mr_scache);
- mlx5_compress_hw_global_release(priv);
+ mlx5_compress_uar_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
}
return 0;
@@ -333,8 +333,8 @@ mlx5_crypto_addr2mr(struct mlx5_crypto_priv *priv, uintptr_t addr,
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(ol_flags & EXT_ATTACHED_MBUF));
+ return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+ addr, !!(ol_flags & EXT_ATTACHED_MBUF));
}
static __rte_always_inline uint32_t
@@ -610,7 +610,7 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
struct mlx5_umr_wqe *umr;
uint32_t i;
struct mlx5_devx_mkey_attr attr = {
- .pd = priv->pdn,
+ .pd = priv->cdev->pdn,
.umr_en = 1,
.crypto_en = 1,
.set_remote_rw = 1,
@@ -664,7 +664,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
- attr.pd = priv->pdn;
+ attr.pd = priv->cdev->pdn;
attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
attr.cqn = qp->cq_obj.cq->id;
attr.rq_size = 0;
@@ -754,12 +754,8 @@ static struct rte_cryptodev_ops mlx5_crypto_ops = {
};
static void
-mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv)
+mlx5_crypto_uar_release(struct mlx5_crypto_priv *priv)
{
- if (priv->pd != NULL) {
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
- priv->pd = NULL;
- }
if (priv->uar != NULL) {
mlx5_glue->devx_free_uar(priv->uar);
priv->uar = NULL;
@@ -767,47 +763,13 @@ mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv)
}
static int
-mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv)
+mlx5_crypto_uar_prepare(struct mlx5_crypto_priv *priv)
{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret;
-
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (priv->pd == NULL) {
- DRV_LOG(ERR, "Failed to allocate PD.");
- return errno ? -errno : -ENOMEM;
- }
- obj.pd.in = priv->pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret != 0) {
- DRV_LOG(ERR, "Fail to get PD object info.");
- mlx5_glue->dealloc_pd(priv->pd);
- priv->pd = NULL;
- return -errno;
- }
- priv->pdn = pd_info.pdn;
- return 0;
-#else
- (void)priv;
- DRV_LOG(ERR, "Cannot get pdn - no DV support.");
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
-static int
-mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv)
-{
- if (mlx5_crypto_pd_create(priv) != 0)
- return -1;
priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (priv->uar)
priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
if (priv->uar == NULL || priv->uar_addr == NULL) {
rte_errno = errno;
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
DRV_LOG(ERR, "Failed to allocate UAR.");
return -1;
}
@@ -1011,14 +973,14 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
priv->login_obj = login;
priv->crypto_dev = crypto_dev;
priv->qp_ts_format = attr.qp_ts_format;
- if (mlx5_crypto_hw_global_prepare(priv) != 0) {
+ if (mlx5_crypto_uar_prepare(priv) != 0) {
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
}
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
- mlx5_crypto_hw_global_release(priv);
+ mlx5_crypto_uar_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
rte_errno = ENOMEM;
return -rte_errno;
@@ -1066,7 +1028,7 @@ mlx5_crypto_dev_remove(struct mlx5_common_device *cdev)
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
NULL);
mlx5_mr_release_cache(&priv->mr_scache);
- mlx5_crypto_hw_global_release(priv);
+ mlx5_crypto_uar_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
}
@@ -23,10 +23,8 @@ struct mlx5_crypto_priv {
struct rte_cryptodev *crypto_dev;
void *uar; /* User Access Region. */
volatile uint64_t *uar_addr;
- uint32_t pdn; /* Protection Domain number. */
uint32_t max_segs_num; /* Maximum supported data segs. */
uint8_t qp_ts_format; /* Whether QP supports timestamp formats. */
- struct ibv_pd *pd;
struct mlx5_hlist *dek_hlist; /* Dek hash list. */
struct rte_cryptodev_config dev_config;
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
@@ -94,7 +94,7 @@ mlx5_crypto_dek_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
struct mlx5_crypto_dek *dek = rte_zmalloc(__func__, sizeof(*dek),
RTE_CACHE_LINE_SIZE);
struct mlx5_devx_dek_attr dek_attr = {
- .pd = ctx->priv->pdn,
+ .pd = ctx->priv->cdev->pdn,
.key_purpose = MLX5_CRYPTO_KEY_PURPOSE_AES_XTS,
.has_keytag = 1,
};
@@ -90,7 +90,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
switch (param->type) {
case MLX5_MP_REQ_CREATE_MR:
mp_init_msg(&priv->mp_id, &mp_res, param->type);
- lkey = mlx5_mr_create_primary(priv->sh->pd,
+ lkey = mlx5_mr_create_primary(cdev->pd,
&priv->sh->share_cache,
&entry, param->args.addr,
cdev->config.mr_ext_memseg_en);
@@ -785,7 +785,7 @@ mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
.cq = cq,
});
if (wq) {
@@ -2711,41 +2711,6 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
return mlx5_os_auxiliary_probe(cdev);
}
-/**
- * Extract pdn of PD object using DV API.
- *
- * @param[in] pd
- * Pointer to the verbs PD object.
- * @param[out] pdn
- * Pointer to the PD object number variable.
- *
- * @return
- * 0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret = 0;
-
- obj.pd.in = pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret) {
- DRV_LOG(DEBUG, "Fail to get PD object info");
- return ret;
- }
- *pdn = pd_info.pdn;
- return 0;
-#else
- (void)pd;
- (void)pdn;
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
/**
* Install shared asynchronous device events handler.
* This function is implemented to support event sharing
@@ -289,7 +289,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
.max_wr = wqe_n >> rxq_data->sges_n,
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
.cq = rxq_obj->ibv_cq,
.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
.create_flags = (rxq_data->vlan_strip ?
@@ -627,7 +627,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
},
&qp_init_attr);
#else
@@ -648,7 +648,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
});
#endif
if (!qp) {
@@ -741,7 +741,7 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
.cq = rxq->ibv_cq,
});
if (!rxq->wq) {
@@ -807,7 +807,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
.rx_hash_fields_mask = 0,
},
.rwq_ind_tbl = ind_tbl,
- .pd = priv->sh->pd
+ .pd = priv->sh->cdev->pd
});
if (!hrxq->qp) {
DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
@@ -895,7 +895,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
qp_attr.qp_type = IBV_QPT_RAW_PACKET,
/* Do *NOT* enable this, completions events are managed per Tx burst. */
qp_attr.sq_sig_all = 0;
- qp_attr.pd = priv->sh->pd;
+ qp_attr.pd = priv->sh->cdev->pd;
qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
if (txq_data->inlen_send)
qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
@@ -1117,7 +1117,7 @@ mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask = IBV_QP_INIT_ATTR_PD,
- .pd = sh->pd,
+ .pd = sh->cdev->pd,
.send_cq = sh->self_lb.ibv_cq,
.recv_cq = sh->self_lb.ibv_cq,
.cap.max_recv_wr = 1,
@@ -1109,7 +1109,7 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
mlx5_mp_id_init(&mp_id, 0);
if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
- mp->name, sh->pd, rte_strerror(rte_errno));
+ mp->name, sh->cdev->pd, rte_strerror(rte_errno));
}
/**
@@ -1129,10 +1129,11 @@ mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
int ret;
mlx5_mp_id_init(&mp_id, 0);
- ret = mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp, &mp_id);
+ ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
+ &mp_id);
if (ret < 0 && rte_errno != EEXIST)
DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
- mp->name, sh->pd, rte_strerror(rte_errno));
+ mp->name, sh->cdev->pd, rte_strerror(rte_errno));
}
/**
@@ -1171,10 +1172,11 @@ mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
switch (event) {
case RTE_MEMPOOL_EVENT_READY:
mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp,
+ if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
&mp_id) < 0)
DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
- mp->name, sh->pd, rte_strerror(rte_errno));
+ mp->name, sh->cdev->pd,
+ rte_strerror(rte_errno));
break;
case RTE_MEMPOOL_EVENT_DESTROY:
mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
@@ -1306,18 +1308,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
}
- sh->pd = mlx5_os_alloc_pd(sh->cdev->ctx);
- if (sh->pd == NULL) {
- DRV_LOG(ERR, "PD allocation failure");
- err = ENOMEM;
- goto error;
- }
if (sh->devx) {
- err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
- if (err) {
- DRV_LOG(ERR, "Fail to extract pdn from PD");
- goto error;
- }
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
if (!sh->td) {
DRV_LOG(ERR, "TD allocation failure");
@@ -1405,8 +1396,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
mlx5_glue->devx_free_uar(sh->devx_rx_uar);
if (sh->tx_uar)
mlx5_glue->devx_free_uar(sh->tx_uar);
- if (sh->pd)
- claim_zero(mlx5_os_dealloc_pd(sh->pd));
mlx5_free(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
@@ -1487,8 +1476,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
mlx5_glue->devx_free_uar(sh->tx_uar);
sh->tx_uar = NULL;
}
- if (sh->pd)
- claim_zero(mlx5_os_dealloc_pd(sh->pd));
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
@@ -1140,8 +1140,6 @@ struct mlx5_dev_ctx_shared {
uint32_t max_port; /* Maximal IB device port index. */
struct mlx5_bond_info bond; /* Bonding information. */
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
- void *pd; /* Protection Domain. */
- uint32_t pdn; /* Protection Domain number. */
uint32_t tdn; /* Transport Domain number. */
char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
@@ -1769,7 +1767,6 @@ void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
struct rte_pci_driver;
int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
-int mlx5_os_get_pdn(void *pd, uint32_t *pdn);
int mlx5_os_net_probe(struct mlx5_common_device *cdev);
void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
@@ -276,7 +276,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
MLX5_WQ_END_PAD_MODE_ALIGN :
MLX5_WQ_END_PAD_MODE_NONE;
- rq_attr.wq_attr.pd = priv->sh->pdn;
+ rq_attr.wq_attr.pd = priv->sh->cdev->pdn;
rq_attr.counter_set_id = priv->counter_set_id;
/* Create RQ using DevX API. */
return mlx5_devx_rq_create(priv->sh->cdev->ctx, &rxq_ctrl->obj->rq_obj,
@@ -994,7 +994,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
.tis_lst_sz = 1,
.tis_num = priv->sh->tis->id,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = priv->sh->pdn,
+ .pd = priv->sh->cdev->pdn,
.uar_page =
mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
},
@@ -7639,7 +7639,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->pdn;
+ mkey_attr.pd = sh->cdev->pdn;
mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
@@ -103,7 +103,7 @@ mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
return -1;
}
- ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);
+ ret = sh->share_cache.reg_mr_cb(sh->cdev->pd, mr->addr, length, mr);
if (ret) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
mlx5_free(mr->addr);
@@ -317,8 +317,9 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
return -1;
if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ sh->sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
return -1;
}
@@ -326,8 +327,9 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
break;
case ASO_OPC_MOD_POLICER:
if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format))
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ sh->sq_ts_format))
return -1;
mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
break;
@@ -337,8 +339,9 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
&sh->ct_mng->aso_sq.mr, 0))
return -1;
if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ sh->sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
return -1;
}
@@ -6467,12 +6467,10 @@ flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
* NULL otherwise and rte_errno is set.
*/
static struct mlx5_aso_mtr_pool *
-flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
- struct mlx5_aso_mtr **mtr_free)
+flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_aso_mtr_pools_mng *pools_mng =
- &priv->sh->mtrmng->pools_mng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
struct mlx5_aso_mtr_pool *pool = NULL;
struct mlx5_devx_obj *dcs = NULL;
uint32_t i;
@@ -6480,7 +6478,8 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
- priv->sh->pdn, log_obj_size);
+ priv->sh->cdev->pdn,
+ log_obj_size);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
@@ -6502,8 +6501,7 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
pools_mng->n_valid++;
for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
pool->mtrs[i].offset = i;
- LIST_INSERT_HEAD(&pools_mng->meters,
- &pool->mtrs[i], next);
+ LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
}
pool->mtrs[0].offset = 0;
*mtr_free = &pool->mtrs[0];
@@ -11956,7 +11954,7 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev,
uint32_t i;
obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
- priv->sh->pdn);
+ priv->sh->cdev->pdn);
if (!obj) {
rte_errno = ENODATA;
DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
@@ -12384,7 +12382,8 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,
uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
- priv->sh->pdn, log_obj_size);
+ priv->sh->cdev->pdn,
+ log_obj_size);
if (!obj) {
rte_errno = ENODATA;
DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
@@ -84,7 +84,7 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
struct mlx5_priv *priv = txq_ctrl->priv;
- return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+ return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
&priv->sh->share_cache, mr_ctrl, addr,
priv->sh->cdev->config.mr_ext_memseg_en);
}
@@ -180,7 +180,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
return;
DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
dev->data->port_id, mem_idx, mp->name);
- mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+ mr = mlx5_create_mr_ext(sh->cdev->pd, addr, len, mp->socket_id,
sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
@@ -196,8 +196,8 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
mlx5_mr_insert_cache(&sh->share_cache, mr);
rte_rwlock_write_unlock(&sh->share_cache.rwlock);
/* Insert to the local cache table */
- mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache, mr_ctrl,
- addr, priv->sh->cdev->config.mr_ext_memseg_en);
+ mlx5_mr_addr2mr_bh(sh->cdev->pd, &priv->mp_id, &sh->share_cache,
+ mr_ctrl, addr, sh->cdev->config.mr_ext_memseg_en);
}
/**
@@ -256,8 +256,8 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
}
priv = dev->data->dev_private;
sh = priv->sh;
- mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
- sh->share_cache.reg_mr_cb);
+ mr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len,
+ SOCKET_ID_ANY, sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
"port %u unable to dma map", dev->data->port_id);
@@ -1242,8 +1242,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(&priv->sh->share_cache, priv->sh->pd,
- mp, &priv->mp_id);
+ ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+ priv->sh->cdev->pd, mp, &priv->mp_id);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
@@ -149,7 +149,8 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
mp = rxq_ctrl->rxq.rxseg[s].mp;
ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
- priv->sh->pd, mp, &priv->mp_id);
+ priv->sh->cdev->pd, mp,
+ &priv->mp_id);
if (ret < 0 && rte_errno != EEXIST)
return ret;
rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
@@ -232,7 +232,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
.tis_lst_sz = 1,
.tis_num = sh->tis->id,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = sh->pdn,
+ .pd = sh->cdev->pdn,
.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
},
.ts_format = mlx5_ts_format_conv(sh->sq_ts_format),
@@ -444,7 +444,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
sq_attr.wq_attr.cd_slave = 1;
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
- sq_attr.wq_attr.pd = sh->pdn;
+ sq_attr.wq_attr.pd = sh->cdev->pdn;
sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
log2above(wq->sq_size),
@@ -965,25 +965,4 @@ mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
*dereg_mr_cb = mlx5_os_dereg_mr;
}
-/**
- * Extract pdn of PD object using DevX
- *
- * @param[in] pd
- * Pointer to the DevX PD object.
- * @param[out] pdn
- * Pointer to the PD object number variable.
- *
- * @return
- * 0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
- if (!pd)
- return -EINVAL;
-
- *pdn = ((struct mlx5_pd *)pd)->pdn;
- return 0;
-}
-
const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
@@ -187,12 +187,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
rte_errno = ENOMEM;
goto error;
}
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (!priv->pd) {
- DRV_LOG(ERR, "can't allocate pd.");
- rte_errno = ENOMEM;
- goto error;
- }
priv->regexdev->dev_ops = &mlx5_regexdev_ops;
priv->regexdev->enqueue = mlx5_regexdev_enqueue;
#ifdef HAVE_MLX5_UMR_IMKEY
@@ -230,8 +224,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
return 0;
error:
- if (priv->pd)
- mlx5_glue->dealloc_pd(priv->pd);
if (priv->uar)
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
@@ -264,8 +256,6 @@ mlx5_regex_dev_remove(struct mlx5_common_device *cdev)
NULL);
if (priv->mr_scache.cache.table)
mlx5_mr_release_cache(&priv->mr_scache);
- if (priv->pd)
- mlx5_glue->dealloc_pd(priv->pd);
if (priv->uar)
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
@@ -68,7 +68,6 @@ struct mlx5_regex_priv {
MLX5_RXP_EM_COUNT];
uint32_t nb_engines; /* Number of RegEx engines. */
struct mlx5dv_devx_uar *uar; /* UAR object. */
- struct ibv_pd *pd;
TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;
/**< Called by memory event callback. */
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
@@ -79,26 +78,6 @@ struct mlx5_regex_priv {
uint32_t mmo_regex_sq_cap:1;
};
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-static inline int
-regex_get_pdn(void *pd, uint32_t *pdn)
-{
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret = 0;
-
- obj.pd.in = pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret) {
- DRV_LOG(DEBUG, "Fail to get PD object info");
- return ret;
- }
- *pdn = pd_info.pdn;
- return 0;
-}
-#endif
-
/* mlx5_regex.c */
int mlx5_regex_start(struct rte_regexdev *dev);
int mlx5_regex_stop(struct rte_regexdev *dev);
@@ -138,21 +138,17 @@ regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
struct mlx5_devx_qp_attr attr = {
.cqn = qp->cq.cq_obj.cq->id,
.uar_index = priv->uar->page_id,
+ .pd = priv->cdev->pdn,
.ts_format = mlx5_ts_format_conv(priv->qp_ts_format),
.user_index = q_ind,
};
struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
- uint32_t pd_num = 0;
int ret;
qp_obj->log_nb_desc = log_nb_desc;
qp_obj->qpn = q_ind;
qp_obj->ci = 0;
qp_obj->pi = 0;
- ret = regex_get_pdn(priv->pd, &pd_num);
- if (ret)
- return ret;
- attr.pd = pd_num;
attr.rq_size = 0;
attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
log_nb_desc));
@@ -138,8 +138,8 @@ mlx5_regex_addr2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
+ return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+ addr, !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
}
@@ -639,7 +639,7 @@ setup_qps(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *queue)
static int
setup_buffers(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp)
{
- struct ibv_pd *pd = priv->pd;
+ struct ibv_pd *pd = priv->cdev->pd;
uint32_t i;
int err;
@@ -746,12 +746,7 @@ mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
if (priv->has_umr) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- if (regex_get_pdn(priv->pd, &attr.pd)) {
- err = -rte_errno;
- DRV_LOG(ERR, "Failed to get pdn.");
- mlx5_regexdev_teardown_fastpath(priv, qp_id);
- return err;
- }
+ attr.pd = priv->cdev->pdn;
#endif
for (i = 0; i < qp->nb_desc; i++) {
attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
@@ -188,37 +188,6 @@ mlx5_vdpa_features_set(int vid)
return 0;
}
-static int
-mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (priv->pd == NULL) {
- DRV_LOG(ERR, "Failed to allocate PD.");
- return errno ? -errno : -ENOMEM;
- }
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret = 0;
-
- obj.pd.in = priv->pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret) {
- DRV_LOG(ERR, "Fail to get PD object info.");
- mlx5_glue->dealloc_pd(priv->pd);
- priv->pd = NULL;
- return -errno;
- }
- priv->pdn = pd_info.pdn;
- return 0;
-#else
- (void)priv;
- DRV_LOG(ERR, "Cannot get pdn - no DV support.");
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
static int
mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
{
@@ -289,10 +258,6 @@ mlx5_vdpa_dev_close(int vid)
mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_event_qp_global_release(priv);
mlx5_vdpa_mem_dereg(priv);
- if (priv->pd) {
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
- priv->pd = NULL;
- }
priv->configured = 0;
priv->vid = 0;
/* The mutex may stay locked after event thread cancel - initiate it. */
@@ -320,8 +285,7 @@ mlx5_vdpa_dev_config(int vid)
if (mlx5_vdpa_mtu_set(priv))
DRV_LOG(WARNING, "MTU cannot be set on device %s.",
vdev->device->name);
- if (mlx5_vdpa_pd_create(priv) || mlx5_vdpa_mem_register(priv) ||
- mlx5_vdpa_err_event_setup(priv) ||
+ if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_err_event_setup(priv) ||
mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
mlx5_vdpa_cqe_event_setup(priv)) {
mlx5_vdpa_dev_close(vid);
@@ -131,8 +131,6 @@ struct mlx5_vdpa_priv {
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
int vid; /* vhost device id. */
struct mlx5_hca_vdpa_attr caps;
- uint32_t pdn; /* Protection Domain number. */
- struct ibv_pd *pd;
uint32_t gpa_mkey_index;
struct ibv_mr *null_mr;
struct rte_vhost_memory *vmem;
@@ -593,7 +593,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
return -1;
if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
return -1;
- attr.pd = priv->pdn;
+ attr.pd = priv->cdev->pdn;
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
if (!eqp->fw_qp) {
@@ -39,7 +39,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
struct mlx5_devx_mkey_attr mkey_attr = {
.addr = (uintptr_t)log_base,
.size = log_size,
- .pd = priv->pdn,
+ .pd = priv->cdev->pdn,
.pg_access = 1,
};
struct mlx5_devx_virtq_attr attr = {
@@ -193,7 +193,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
if (!mem)
return -rte_errno;
priv->vmem = mem;
- priv->null_mr = mlx5_glue->alloc_null_mr(priv->pd);
+ priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);
if (!priv->null_mr) {
DRV_LOG(ERR, "Failed to allocate null MR.");
ret = -errno;
@@ -220,7 +220,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
mkey_attr.addr = (uintptr_t)(reg->guest_phys_addr);
mkey_attr.size = reg->size;
mkey_attr.umem_id = entry->umem->umem_id;
- mkey_attr.pd = priv->pdn;
+ mkey_attr.pd = priv->cdev->pdn;
mkey_attr.pg_access = 1;
entry->mkey = mlx5_devx_cmd_mkey_create(priv->cdev->ctx,
&mkey_attr);
@@ -268,7 +268,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
}
mkey_attr.addr = (uintptr_t)(mem->regions[0].guest_phys_addr);
mkey_attr.size = mem_size;
- mkey_attr.pd = priv->pdn;
+ mkey_attr.pd = priv->cdev->pdn;
mkey_attr.umem_id = 0;
/* Must be zero for KLM mode. */
mkey_attr.log_entity_size = mode == MLX5_MKC_ACCESS_MODE_KLM_FBS ?
@@ -322,7 +322,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
attr.mkey = priv->gpa_mkey_index;
attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
attr.queue_index = index;
- attr.pd = priv->pdn;
+ attr.pd = priv->cdev->pdn;
attr.hw_latency_mode = priv->hw_latency_mode;
attr.hw_max_latency_us = priv->hw_max_latency_us;
attr.hw_max_pending_comp = priv->hw_max_pending_comp;