[v1,20/38] vdpa/mlx5: prepare completion queues

Message ID 1579539790-3882-21-git-send-email-matan@mellanox.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series Introduce mlx5 vDPA driver |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Matan Azrad Jan. 20, 2020, 5:02 p.m. UTC
  As an arrangement to the vitrio queues creation, a CQ should be created
for the virtio queue.

The design is to trigger an event for the guest and for the vdpa driver
when a new CQE is posted by the HW after the packet transition.

This patch add the basic operations to create and destroy CQs and to
trigger the CQE events when a new CQE is posted.

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 drivers/vdpa/mlx5/Makefile       |   1 +
 drivers/vdpa/mlx5/meson.build    |   1 +
 drivers/vdpa/mlx5/mlx5_vdpa.h    |  56 ++++++++++++++
 drivers/vdpa/mlx5/mlx5_vdpa_cq.c | 154 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 212 insertions(+)
 create mode 100644 drivers/vdpa/mlx5/mlx5_vdpa_cq.c
  

Patch

diff --git a/drivers/vdpa/mlx5/Makefile b/drivers/vdpa/mlx5/Makefile
index 5472797..f813824 100644
--- a/drivers/vdpa/mlx5/Makefile
+++ b/drivers/vdpa/mlx5/Makefile
@@ -9,6 +9,7 @@  LIB = librte_pmd_mlx5_vdpa.a
 # Sources.
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_mem.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_cq.c
 
 # Basic CFLAGS.
 CFLAGS += -O3
diff --git a/drivers/vdpa/mlx5/meson.build b/drivers/vdpa/mlx5/meson.build
index 7e5dd95..aec5d34 100644
--- a/drivers/vdpa/mlx5/meson.build
+++ b/drivers/vdpa/mlx5/meson.build
@@ -13,6 +13,7 @@  deps += ['hash', 'common_mlx5', 'vhost', 'bus_pci', 'eal', 'sched']
 sources = files(
 	'mlx5_vdpa.c',
 	'mlx5_vdpa_mem.c',
+	'mlx5_vdpa_cq.c',
 )
 cflags_options = [
 	'-std=c11',
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index e27baea..6008e3f 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -9,9 +9,27 @@ 
 
 #include <rte_vdpa.h>
 #include <rte_vhost.h>
+#include <rte_spinlock.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+
+
+struct mlx5_vdpa_cq {
+	uint16_t log_desc_n;
+	uint32_t cq_ci:24;
+	uint32_t arm_sn:2;
+	rte_spinlock_t sl;
+	struct mlx5_devx_obj *cq;
+	struct mlx5dv_devx_umem *umem_obj;
+	union {
+		volatile void *umem_buf;
+		volatile struct mlx5_cqe *cqes;
+	};
+	volatile uint32_t *db_rec;
+	uint64_t errors;
+};
 
 struct mlx5_vdpa_query_mr {
 	SLIST_ENTRY(mlx5_vdpa_query_mr) next;
@@ -34,6 +52,9 @@  struct mlx5_vdpa_priv {
 	uint32_t gpa_mkey_index;
 	struct ibv_mr *null_mr;
 	struct rte_vhost_memory *vmem;
+	uint32_t eqn;
+	struct mlx5dv_devx_event_channel *eventc;
+	struct mlx5dv_devx_uar *uar;
 	SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
 };
 
@@ -57,4 +78,39 @@  struct mlx5_vdpa_priv {
  */
 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
 
+
+/**
+ * Create a CQ and all its related resources.
+ *
+ * @param[in] priv
+ *   The vdpa driver private structure.
+ * @param[in] desc_n
+ *   Number of CQEs.
+ * @param[in] callfd
+ *   The guest notification file descriptor.
+ * @param[in/out] cq
+ *   Pointer to the CQ structure.
+ *
+ * @return
+ *   0 on success, -1 otherwise and rte_errno is set.
+ */
+int mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
+			int callfd, struct mlx5_vdpa_cq *cq);
+
+/**
+ * Destroy a CQ and all its related resources.
+ *
+ * @param[in/out] cq
+ *   Pointer to the CQ structure.
+ */
+void mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq);
+
+/**
+ * Release all the CQ global resources.
+ *
+ * @param[in] priv
+ *   The vdpa driver private structure.
+ */
+void mlx5_vdpa_cq_global_release(struct mlx5_vdpa_priv *priv);
+
 #endif /* RTE_PMD_MLX5_VDPA_H_ */
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cq.c b/drivers/vdpa/mlx5/mlx5_vdpa_cq.c
new file mode 100644
index 0000000..563277f
--- /dev/null
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cq.c
@@ -0,0 +1,154 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+#include <unistd.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+
+#include "mlx5_vdpa_utils.h"
+#include "mlx5_vdpa.h"
+
+void
+mlx5_vdpa_cq_global_release(struct mlx5_vdpa_priv *priv)
+{
+	if (priv->uar) {
+		mlx5_glue->devx_free_uar(priv->uar);
+		priv->uar = NULL;
+	}
+	if (priv->eventc) {
+		mlx5_glue->devx_destroy_event_channel(priv->eventc);
+		priv->eventc = NULL;
+	}
+	priv->eqn = 0;
+}
+
+/* Prepare all the global resources for all the CQs.*/
+static int
+mlx5_vdpa_cq_global_prepare(struct mlx5_vdpa_priv *priv)
+{
+	uint32_t lcore;
+
+	if (priv->eventc)
+		return 0;
+	lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
+	if (mlx5_glue->devx_query_eqn(priv->ctx, lcore, &priv->eqn)) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
+		return -1;
+	}
+	priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
+			   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
+	if (!priv->eventc) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to create event channel %d.",
+			rte_errno);
+		goto error;
+	}
+	priv->uar = mlx5_glue->devx_alloc_uar(priv->ctx, 0);
+	if (!priv->uar) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to allocate UAR.");
+		goto error;
+	}
+	return 0;
+error:
+	mlx5_vdpa_cq_global_release(priv);
+	return -1;
+}
+
+void
+mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
+{
+	int ret __rte_unused;
+
+	if (cq->cq) {
+		ret = mlx5_devx_cmd_destroy(cq->cq);
+		assert(!ret);
+	}
+	if (cq->umem_obj) {
+		ret = mlx5_glue->devx_umem_dereg(cq->umem_obj);
+		assert(!ret);
+	}
+	if (cq->umem_buf)
+		rte_free((void *)(uintptr_t)cq->umem_buf);
+	memset(cq, 0, sizeof(*cq));
+}
+
+int
+mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n, int callfd,
+		    struct mlx5_vdpa_cq *cq)
+{
+	struct mlx5_devx_cq_attr attr;
+	size_t pgsize = sysconf(_SC_PAGESIZE);
+	uint32_t log_desc_n = rte_log2_u32(desc_n);
+	uint32_t umem_size;
+	int ret;
+	uint16_t event_nums[1] = {0};
+
+	if (mlx5_vdpa_cq_global_prepare(priv))
+		return -1;
+	cq->log_desc_n = log_desc_n;
+	umem_size = sizeof(struct mlx5_cqe) * (1 << log_desc_n) +
+							sizeof(*cq->db_rec) * 2;
+	cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
+	if (!cq->umem_buf) {
+		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
+		rte_errno = ENOMEM;
+		return -ENOMEM;
+	}
+	cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
+						(void *)(uintptr_t)cq->umem_buf,
+						umem_size,
+						IBV_ACCESS_LOCAL_WRITE);
+	if (!cq->umem_obj) {
+		DRV_LOG(ERR, "Failed to register umem for CQ.");
+		goto error;
+	}
+	attr.q_umem_valid = 1;
+	attr.db_umem_valid = 1;
+	attr.use_first_only = 0;
+	attr.overrun_ignore = 0;
+	attr.uar_page_id = priv->uar->page_id;
+	attr.q_umem_id = cq->umem_obj->umem_id;
+	attr.q_umem_offset = 0;
+	attr.db_umem_id = cq->umem_obj->umem_id;
+	attr.db_umem_offset = sizeof(struct mlx5_cqe) * (1 << log_desc_n);
+	attr.eqn = priv->eqn;
+	attr.log_cq_size = log_desc_n;
+	attr.log_page_size = rte_log2_u32(pgsize);
+	cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
+	if (!cq->cq)
+		goto error;
+	cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
+	cq->cq_ci = 0;
+	rte_spinlock_init(&cq->sl);
+	/* Subscribe CQ event to the event channel controlled by the driver. */
+	ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
+						   sizeof(event_nums),
+						   event_nums,
+						   (uint64_t)(uintptr_t)cq);
+	if (ret) {
+		DRV_LOG(ERR, "Failed to subscribe CQE event.");
+		rte_errno = errno;
+		goto error;
+	}
+	/* Subscribe CQ event to the guest FD only if it is not in poll mode. */
+	if (callfd != -1) {
+		ret = mlx5_glue->devx_subscribe_devx_event_fd(priv->eventc,
+							      callfd,
+							      cq->cq->obj, 0);
+		if (ret) {
+			DRV_LOG(ERR, "Failed to subscribe CQE event fd.");
+			rte_errno = errno;
+			goto error;
+		}
+	}
+	return 0;
+error:
+	mlx5_vdpa_cq_destroy(cq);
+	return -1;
+}