[v1,08/38] vdpa/mlx5: support queues number operation
diff mbox series

Message ID 1579539790-3882-9-git-send-email-matan@mellanox.com
State Superseded, archived
Delegated to: Maxime Coquelin
Headers show
Series
  • Introduce mlx5 vDPA driver
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Matan Azrad Jan. 20, 2020, 5:02 p.m. UTC
Support get_queue_num operation to get the maximum number of queues
supported by the device.

This number comes from the DevX capabilities.

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c | 54 ++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 53 insertions(+), 1 deletion(-)

Patch
diff mbox series

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index cb49a32..32ca908 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -9,6 +9,7 @@ 
 
 #include <mlx5_glue.h>
 #include <mlx5_common.h>
+#include <mlx5_devx_cmds.h>
 
 #include "mlx5_vdpa_utils.h"
 
@@ -18,6 +19,7 @@  struct mlx5_vdpa_priv {
 	int id; /* vDPA device id. */
 	struct ibv_context *ctx; /* Device context. */
 	struct rte_vdpa_dev_addr dev_addr;
+	struct mlx5_hca_vdpa_attr caps;
 };
 
 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
@@ -25,8 +27,43 @@  struct mlx5_vdpa_priv {
 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
 int mlx5_vdpa_logtype;
 
+static struct mlx5_vdpa_priv *
+mlx5_vdpa_find_priv_resource_by_did(int did)
+{
+	struct mlx5_vdpa_priv *priv;
+	int found = 0;
+
+	pthread_mutex_lock(&priv_list_lock);
+	TAILQ_FOREACH(priv, &priv_list, next) {
+		if (did == priv->id) {
+			found = 1;
+			break;
+		}
+	}
+	pthread_mutex_unlock(&priv_list_lock);
+	if (!found) {
+		DRV_LOG(ERR, "Invalid device id: %d.", did);
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return priv;
+}
+
+static int
+mlx5_vdpa_get_queue_num(int did, uint32_t *queue_num)
+{
+	struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+	if (priv == NULL) {
+		DRV_LOG(ERR, "Invalid device id: %d.", did);
+		return -1;
+	}
+	*queue_num = priv->caps.max_num_virtio_queues;
+	return 0;
+}
+
 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
-	.get_queue_num = NULL,
+	.get_queue_num = mlx5_vdpa_get_queue_num,
 	.get_features = NULL,
 	.get_protocol_features = NULL,
 	.dev_conf = NULL,
@@ -60,6 +97,7 @@  struct mlx5_vdpa_priv {
 	struct ibv_device *ibv_match = NULL;
 	struct mlx5_vdpa_priv *priv = NULL;
 	struct ibv_context *ctx;
+	struct mlx5_hca_attr attr;
 	int ret;
 
 	errno = 0;
@@ -107,6 +145,20 @@  struct mlx5_vdpa_priv {
 		rte_errno = ENOMEM;
 		goto error;
 	}
+	ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
+	if (ret) {
+		DRV_LOG(ERR, "Unable to read HCA capabilities.");
+		rte_errno = ENOTSUP;
+		goto error;
+	} else {
+		if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+			DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
+				" maybe old FW/OFED version?");
+			rte_errno = ENOTSUP;
+			goto error;
+		}
+		priv->caps = attr.vdpa;
+	}
 	priv->ctx = ctx;
 	priv->dev_addr.pci_addr = pci_dev->addr;
 	priv->dev_addr.type = PCI_ADDR;