[v2,02/13] vdpa/mlx5: support queues number operation
Checks
Commit Message
Support get_queue_num operation to get the maximum number of queues
supported by the device.
This number comes from the DevX capabilities.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 54 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 53 insertions(+), 1 deletion(-)
Comments
On 1/29/20 11:08 AM, Matan Azrad wrote:
> Support get_queue_num operation to get the maximum number of queues
> supported by the device.
>
> This number comes from the DevX capabilities.
>
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> drivers/vdpa/mlx5/mlx5_vdpa.c | 54 ++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 53 insertions(+), 1 deletion(-)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
@@ -9,6 +9,7 @@
#include <mlx5_glue.h>
#include <mlx5_common.h>
+#include <mlx5_devx_cmds.h>
#include "mlx5_vdpa_utils.h"
@@ -18,6 +19,7 @@ struct mlx5_vdpa_priv {
int id; /* vDPA device id. */
struct ibv_context *ctx; /* Device context. */
struct rte_vdpa_dev_addr dev_addr;
+ struct mlx5_hca_vdpa_attr caps;
};
TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
@@ -25,8 +27,43 @@ struct mlx5_vdpa_priv {
static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
int mlx5_vdpa_logtype;
+static struct mlx5_vdpa_priv *
+mlx5_vdpa_find_priv_resource_by_did(int did)
+{
+ struct mlx5_vdpa_priv *priv;
+ int found = 0;
+
+ pthread_mutex_lock(&priv_list_lock);
+ TAILQ_FOREACH(priv, &priv_list, next) {
+ if (did == priv->id) {
+ found = 1;
+ break;
+ }
+ }
+ pthread_mutex_unlock(&priv_list_lock);
+ if (!found) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ return priv;
+}
+
+static int
+mlx5_vdpa_get_queue_num(int did, uint32_t *queue_num)
+{
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -1;
+ }
+ *queue_num = priv->caps.max_num_virtio_queues;
+ return 0;
+}
+
static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
- .get_queue_num = NULL,
+ .get_queue_num = mlx5_vdpa_get_queue_num,
.get_features = NULL,
.get_protocol_features = NULL,
.dev_conf = NULL,
@@ -60,6 +97,7 @@ struct mlx5_vdpa_priv {
struct ibv_device *ibv_match = NULL;
struct mlx5_vdpa_priv *priv = NULL;
struct ibv_context *ctx = NULL;
+ struct mlx5_hca_attr attr;
int ret;
if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_VDPA) {
@@ -113,6 +151,20 @@ struct mlx5_vdpa_priv {
rte_errno = ENOMEM;
goto error;
}
+ ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
+ if (ret) {
+ DRV_LOG(ERR, "Unable to read HCA capabilities.");
+ rte_errno = ENOTSUP;
+ goto error;
+ } else {
+ if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+ DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
+ " maybe old FW/OFED version?");
+ rte_errno = ENOTSUP;
+ goto error;
+ }
+ priv->caps = attr.vdpa;
+ }
priv->ctx = ctx;
priv->dev_addr.pci_addr = pci_dev->addr;
priv->dev_addr.type = PCI_ADDR;