[RFC,04/21] compress/mlx5: use context device structure

Message ID 20210817134441.1966618-5-michaelba@nvidia.com (mailing list archive)
State RFC, archived
Delegated to: Raslan Darawsheh
Headers
Series mlx5: sharing global MR cache between drivers |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Michael Baum Aug. 17, 2021, 1:44 p.m. UTC
  Use common context device structure as a priv field.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
 drivers/compress/mlx5/mlx5_compress.c | 110 ++++++++++----------------
 1 file changed, 42 insertions(+), 68 deletions(-)
  

Patch

diff --git a/drivers/compress/mlx5/mlx5_compress.c b/drivers/compress/mlx5/mlx5_compress.c
index 883e720ec1..e906ddb066 100644
--- a/drivers/compress/mlx5/mlx5_compress.c
+++ b/drivers/compress/mlx5/mlx5_compress.c
@@ -35,14 +35,12 @@  struct mlx5_compress_xform {
 
 struct mlx5_compress_priv {
 	TAILQ_ENTRY(mlx5_compress_priv) next;
-	struct ibv_context *ctx; /* Device context. */
+	struct mlx5_dev_ctx *dev_ctx; /* Device context. */
 	struct rte_compressdev *cdev;
 	void *uar;
-	uint32_t pdn; /* Protection Domain number. */
 	uint8_t min_block_size;
 	uint8_t sq_ts_format; /* Whether SQ supports timestamp formats. */
 	/* Minimum huffman block size supported by the device. */
-	struct ibv_pd *pd;
 	struct rte_compressdev_config dev_config;
 	LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
 	rte_spinlock_t xform_sl;
@@ -185,7 +183,7 @@  mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
 	struct mlx5_devx_create_sq_attr sq_attr = {
 		.user_index = qp_id,
 		.wq_attr = (struct mlx5_devx_wq_attr){
-			.pd = priv->pdn,
+			.pd = priv->dev_ctx->pdn,
 			.uar_page = mlx5_os_get_devx_uar_page_id(priv->uar),
 		},
 	};
@@ -228,24 +226,24 @@  mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
 	qp->priv = priv;
 	qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
 						   RTE_CACHE_LINE_SIZE);
-	if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
-					sizeof(struct mlx5_gga_compress_opaque),
+	if (mlx5_common_verbs_reg_mr(priv->dev_ctx->pd, opaq_buf,
+			qp->entries_n * sizeof(struct mlx5_gga_compress_opaque),
 							 &qp->opaque_mr) != 0) {
 		rte_free(opaq_buf);
 		DRV_LOG(ERR, "Failed to register opaque MR.");
 		rte_errno = ENOMEM;
 		goto err;
 	}
-	ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr,
-				  socket_id);
+	ret = mlx5_devx_cq_create(priv->dev_ctx->ctx, &qp->cq, log_ops_n,
+				  &cq_attr, socket_id);
 	if (ret != 0) {
 		DRV_LOG(ERR, "Failed to create CQ.");
 		goto err;
 	}
 	sq_attr.cqn = qp->cq.cq->id;
 	sq_attr.ts_format = mlx5_ts_format_conv(priv->sq_ts_format);
-	ret = mlx5_devx_sq_create(priv->ctx, &qp->sq, log_ops_n, &sq_attr,
-				  socket_id);
+	ret = mlx5_devx_sq_create(priv->dev_ctx->ctx, &qp->sq, log_ops_n,
+				  &sq_attr, socket_id);
 	if (ret != 0) {
 		DRV_LOG(ERR, "Failed to create SQ.");
 		goto err;
@@ -465,7 +463,8 @@  mlx5_compress_addr2mr(struct mlx5_compress_priv *priv, uintptr_t addr,
 	if (likely(lkey != UINT32_MAX))
 		return lkey;
 	/* Take slower bottom-half on miss. */
-	return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
+	return mlx5_mr_addr2mr_bh(priv->dev_ctx->pd, 0, &priv->mr_scache,
+				  mr_ctrl, addr,
 				  !!(ol_flags & EXT_ATTACHED_MBUF));
 }
 
@@ -689,57 +688,19 @@  mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
 static void
 mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
 {
-	if (priv->pd != NULL) {
-		claim_zero(mlx5_glue->dealloc_pd(priv->pd));
-		priv->pd = NULL;
-	}
 	if (priv->uar != NULL) {
 		mlx5_glue->devx_free_uar(priv->uar);
 		priv->uar = NULL;
 	}
 }
 
-static int
-mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	struct mlx5dv_obj obj;
-	struct mlx5dv_pd pd_info;
-	int ret;
-
-	priv->pd = mlx5_glue->alloc_pd(priv->ctx);
-	if (priv->pd == NULL) {
-		DRV_LOG(ERR, "Failed to allocate PD.");
-		return errno ? -errno : -ENOMEM;
-	}
-	obj.pd.in = priv->pd;
-	obj.pd.out = &pd_info;
-	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-	if (ret != 0) {
-		DRV_LOG(ERR, "Fail to get PD object info.");
-		mlx5_glue->dealloc_pd(priv->pd);
-		priv->pd = NULL;
-		return -errno;
-	}
-	priv->pdn = pd_info.pdn;
-	return 0;
-#else
-	(void)priv;
-	DRV_LOG(ERR, "Cannot get pdn - no DV support.");
-	return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
 static int
 mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
 {
-	if (mlx5_compress_pd_create(priv) != 0)
-		return -1;
-	priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
+	priv->uar = mlx5_devx_alloc_uar(priv->dev_ctx->ctx, -1);
 	if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
 	    NULL) {
 		rte_errno = errno;
-		claim_zero(mlx5_glue->dealloc_pd(priv->pd));
 		DRV_LOG(ERR, "Failed to allocate UAR.");
 		return -1;
 	}
@@ -775,7 +736,8 @@  mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
 		/* Iterate all the existing mlx5 devices. */
 		TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
 			mlx5_free_mr_by_addr(&priv->mr_scache,
-					     priv->ctx->device->name,
+					     mlx5_os_get_ctx_device_name
+							   (priv->dev_ctx->ctx),
 					     addr, len);
 		pthread_mutex_unlock(&priv_list_lock);
 		break;
@@ -788,60 +750,70 @@  mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
 static int
 mlx5_compress_dev_probe(struct rte_device *dev)
 {
-	struct ibv_device *ibv;
 	struct rte_compressdev *cdev;
-	struct ibv_context *ctx;
+	struct mlx5_dev_ctx *dev_ctx;
 	struct mlx5_compress_priv *priv;
 	struct mlx5_hca_attr att = { 0 };
 	struct rte_compressdev_pmd_init_params init_params = {
 		.name = "",
 		.socket_id = dev->numa_node,
 	};
+	const char *ibdev_name;
+	int ret;
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 		DRV_LOG(ERR, "Non-primary process type is not supported.");
 		rte_errno = ENOTSUP;
 		return -rte_errno;
 	}
-	ibv = mlx5_os_get_ibv_dev(dev);
-	if (ibv == NULL)
+	dev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx),
+			      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (dev_ctx == NULL) {
+		DRV_LOG(ERR, "Device context allocation failure.");
+		rte_errno = ENOMEM;
 		return -rte_errno;
-	ctx = mlx5_glue->dv_open_device(ibv);
-	if (ctx == NULL) {
-		DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
+	}
+	ret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_COMPRESS);
+	if (ret < 0) {
+		DRV_LOG(ERR, "Failed to create device context.");
+		mlx5_free(dev_ctx);
 		rte_errno = ENODEV;
 		return -rte_errno;
 	}
-	if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 ||
+	ibdev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);
+	if (mlx5_devx_cmd_query_hca_attr(dev_ctx->ctx, &att) != 0 ||
 	    att.mmo_compress_en == 0 || att.mmo_decompress_en == 0 ||
 	    att.mmo_dma_en == 0) {
 		DRV_LOG(ERR, "Not enough capabilities to support compress "
 			"operations, maybe old FW/OFED version?");
-		claim_zero(mlx5_glue->close_device(ctx));
+		mlx5_dev_ctx_release(dev_ctx);
+		mlx5_free(dev_ctx);
 		rte_errno = ENOTSUP;
 		return -ENOTSUP;
 	}
-	cdev = rte_compressdev_pmd_create(ibv->name, dev,
+	cdev = rte_compressdev_pmd_create(ibdev_name, dev,
 					  sizeof(*priv), &init_params);
 	if (cdev == NULL) {
-		DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
-		claim_zero(mlx5_glue->close_device(ctx));
+		DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name);
+		mlx5_dev_ctx_release(dev_ctx);
+		mlx5_free(dev_ctx);
 		return -ENODEV;
 	}
 	DRV_LOG(INFO,
-		"Compress device %s was created successfully.", ibv->name);
+		"Compress device %s was created successfully.", ibdev_name);
 	cdev->dev_ops = &mlx5_compress_ops;
 	cdev->dequeue_burst = mlx5_compress_dequeue_burst;
 	cdev->enqueue_burst = mlx5_compress_enqueue_burst;
 	cdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
 	priv = cdev->data->dev_private;
-	priv->ctx = ctx;
+	priv->dev_ctx = dev_ctx;
 	priv->cdev = cdev;
 	priv->min_block_size = att.compress_min_block_size;
 	priv->sq_ts_format = att.sq_ts_format;
 	if (mlx5_compress_hw_global_prepare(priv) != 0) {
 		rte_compressdev_pmd_destroy(priv->cdev);
-		claim_zero(mlx5_glue->close_device(priv->ctx));
+		mlx5_dev_ctx_release(priv->dev_ctx);
+		mlx5_free(priv->dev_ctx);
 		return -1;
 	}
 	if (mlx5_mr_btree_init(&priv->mr_scache.cache,
@@ -849,7 +821,8 @@  mlx5_compress_dev_probe(struct rte_device *dev)
 		DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
 		mlx5_compress_hw_global_release(priv);
 		rte_compressdev_pmd_destroy(priv->cdev);
-		claim_zero(mlx5_glue->close_device(priv->ctx));
+		mlx5_dev_ctx_release(priv->dev_ctx);
+		mlx5_free(priv->dev_ctx);
 		rte_errno = ENOMEM;
 		return -rte_errno;
 	}
@@ -885,7 +858,8 @@  mlx5_compress_dev_remove(struct rte_device *dev)
 		mlx5_mr_release_cache(&priv->mr_scache);
 		mlx5_compress_hw_global_release(priv);
 		rte_compressdev_pmd_destroy(priv->cdev);
-		claim_zero(mlx5_glue->close_device(priv->ctx));
+		mlx5_dev_ctx_release(priv->dev_ctx);
+		mlx5_free(priv->dev_ctx);
 	}
 	return 0;
 }