@@ -74,6 +74,14 @@ New Features
names and to reset them by a vDPA device.
+ * **Updated Mellanox mlx5 vDPA driver.**
+
+ Updated Mellanox mlx5 vDPA driver with new features, including:
+
+ * Added support for virtio queue statistics.
+
+
+
Removed Items
-------------
@@ -17,6 +17,7 @@ packed = Y
proto mq = Y
proto log shmfd = Y
proto host notifier = Y
+queue statistics = Y
Other kdrv = Y
ARMv8 = Y
Power8 = Y
@@ -8,6 +8,7 @@
#include <rte_errno.h>
#include <rte_bus_pci.h>
#include <rte_pci.h>
+#include <rte_string_fns.h>
#include <mlx5_glue.h>
#include <mlx5_common.h>
@@ -274,6 +275,85 @@
return 0;
}
+static int
+mlx5_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
+ unsigned int size)
+{
+ static const char *mlx5_vdpa_stats_names[MLX5_VDPA_STATS_MAX] = {
+ "received_descriptors",
+ "completed_descriptors",
+ "bad descriptor errors",
+ "exceed max chain",
+ "invalid buffer",
+ "completion errors",
+ };
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+ unsigned int i;
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -ENODEV;
+ }
+ if (!stats_names)
+ return MLX5_VDPA_STATS_MAX;
+ size = RTE_MIN(size, (unsigned int)MLX5_VDPA_STATS_MAX);
+ for (i = 0; i < size; ++i)
+ strlcpy(stats_names[i].name, mlx5_vdpa_stats_names[i],
+ RTE_VDPA_STATS_NAME_SIZE);
+ return size;
+}
+
+static int
+mlx5_vdpa_get_stats(int did, int qid, struct rte_vdpa_stat *stats,
+ unsigned int n)
+{
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -ENODEV;
+ }
+ if (!priv->configured) {
+ DRV_LOG(ERR, "Device %d was not configured.", did);
+ return -ENODATA;
+ }
+ if (qid >= (int)priv->nr_virtqs) {
+ DRV_LOG(ERR, "Too big vring id: %d.", qid);
+ return -E2BIG;
+ }
+ if (!priv->caps.queue_counters_valid) {
+ DRV_LOG(ERR, "Virtq statistics is not supported for device %d.",
+ did);
+ return -ENOTSUP;
+ }
+ return mlx5_vdpa_virtq_stats_get(priv, qid, stats, n);
+}
+
+static int
+mlx5_vdpa_reset_stats(int did, int qid)
+{
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -ENODEV;
+ }
+ if (!priv->configured) {
+ DRV_LOG(ERR, "Device %d was not configured.", did);
+ return -ENODATA;
+ }
+ if (qid >= (int)priv->nr_virtqs) {
+ DRV_LOG(ERR, "Too big vring id: %d.", qid);
+ return -E2BIG;
+ }
+ if (!priv->caps.queue_counters_valid) {
+ DRV_LOG(ERR, "Virtq statistics is not supported for device %d.",
+ did);
+ return -ENOTSUP;
+ }
+ return mlx5_vdpa_virtq_stats_reset(priv, qid);
+}
+
static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
.get_queue_num = mlx5_vdpa_get_queue_num,
.get_features = mlx5_vdpa_get_vdpa_features,
@@ -286,6 +366,9 @@
.get_vfio_group_fd = NULL,
.get_vfio_device_fd = mlx5_vdpa_get_device_fd,
.get_notify_area = mlx5_vdpa_get_notify_area,
+ .get_stats_names = mlx5_vdpa_get_stats_names,
+ .get_stats = mlx5_vdpa_get_stats,
+ .reset_stats = mlx5_vdpa_reset_stats,
};
static struct ibv_device *
@@ -489,6 +572,8 @@
rte_errno = ENOTSUP;
goto error;
}
+ if (!attr.vdpa.queue_counters_valid)
+ DRV_LOG(DEBUG, "No capability to support virtq statistics.");
priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
sizeof(struct mlx5_vdpa_virtq) *
attr.vdpa.max_num_virtio_queues * 2,
@@ -76,6 +76,7 @@ struct mlx5_vdpa_virtq {
uint16_t vq_size;
struct mlx5_vdpa_priv *priv;
struct mlx5_devx_obj *virtq;
+ struct mlx5_devx_obj *counters;
struct mlx5_vdpa_event_qp eqp;
struct {
struct mlx5dv_devx_umem *obj;
@@ -83,6 +84,7 @@ struct mlx5_vdpa_virtq {
uint32_t size;
} umems[3];
struct rte_intr_handle intr_handle;
+ struct mlx5_devx_virtio_q_couners_attr reset;
};
struct mlx5_vdpa_steer {
@@ -127,6 +129,16 @@ struct mlx5_vdpa_priv {
struct mlx5_vdpa_virtq virtqs[];
};
+enum {
+ MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
+ MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
+ MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
+ MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
+ MLX5_VDPA_STATS_INVALID_BUFFER,
+ MLX5_VDPA_STATS_COMPLETION_ERRORS,
+ MLX5_VDPA_STATS_MAX
+};
+
/*
* Check whether virtq is for traffic receive.
* According to VIRTIO_NET Spec the virtqueues index identity its type by:
@@ -352,4 +364,37 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
*/
int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
+/**
+ * Get virtq statistics.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] qid
+ * The virtq index.
+ * @param stats
+ * The virtq statistics array to fill.
+ * @param n
+ * The number of elements in @p stats array.
+ *
+ * @return
+ * A negative value on error, otherwise the number of entries filled in the
+ * @p stats array.
+ */
+int
+mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+ struct rte_vdpa_stat *stats, unsigned int n);
+
+/**
+ * Reset virtq statistics.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] qid
+ * The virtq index.
+ *
+ * @return
+ * A negative value on error, otherwise 0.
+ */
+int
+mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
#endif /* RTE_PMD_MLX5_VDPA_H_ */
@@ -72,6 +72,11 @@
rte_free(virtq->umems[i].buf);
}
memset(&virtq->umems, 0, sizeof(virtq->umems));
+ if (virtq->counters) {
+ claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
+ virtq->counters = NULL;
+ }
+ memset(&virtq->reset, 0, sizeof(virtq->reset));
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
return 0;
@@ -205,6 +210,16 @@
DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
" need event QPs and event mechanism.", index);
}
+ if (priv->caps.queue_counters_valid) {
+ virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
+ (priv->ctx);
+ if (!virtq->counters) {
+ DRV_LOG(ERR, "Failed to create virtq couners for virtq"
+ " %d.", index);
+ goto error;
+ }
+ attr.counters_obj_id = virtq->counters->id;
+ }
/* Setup 3 UMEMs for each virtq. */
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
@@ -455,3 +470,82 @@
}
return 0;
}
+
+int
+mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+ struct rte_vdpa_stat *stats, unsigned int n)
+{
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
+ struct mlx5_devx_virtio_q_couners_attr attr = {0};
+ int ret;
+
+ if (!virtq->virtq || !virtq->enable) {
+ DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
+ "is invalid.", qid);
+ return -EINVAL;
+ }
+ MLX5_ASSERT(virtq->counters);
+ ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
+ return ret;
+ }
+ ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
+ if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
+ return ret;
+ stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
+ .id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
+ .value = attr.received_desc - virtq->reset.received_desc,
+ };
+ if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
+ return ret;
+ stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
+ .id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
+ .value = attr.completed_desc - virtq->reset.completed_desc,
+ };
+ if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
+ return ret;
+ stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
+ .id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
+ .value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
+ };
+ if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
+ return ret;
+ stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
+ .id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
+ .value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
+ };
+ if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
+ return ret;
+ stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
+ .id = MLX5_VDPA_STATS_INVALID_BUFFER,
+ .value = attr.invalid_buffer - virtq->reset.invalid_buffer,
+ };
+ if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
+ return ret;
+ stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
+ .id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
+ .value = attr.error_cqes - virtq->reset.error_cqes,
+ };
+ return ret;
+}
+
+int
+mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
+{
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
+ int ret;
+
+ if (!virtq->virtq || !virtq->enable) {
+ DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
+ "is invalid.", qid);
+ return -EINVAL;
+ }
+ MLX5_ASSERT(virtq->counters);
+ ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
+ &virtq->reset);
+ if (ret)
+ DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
+ qid);
+ return ret;
+}