@@ -17,6 +17,7 @@ packed = Y
proto mq = Y
proto log shmfd = Y
proto host notifier = Y
+queue statistics = Y
Other kdrv = Y
ARMv8 = Y
Power8 = Y
@@ -274,6 +274,31 @@
return 0;
}
+static int
+mlx5_vdpa_get_stats(int did, int qid, struct rte_vdpa_queue_stats *stats)
+{
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -ENODEV;
+ }
+ if (!priv->configured) {
+ DRV_LOG(ERR, "Device %d was not configured.", did);
+ return -ENODATA;
+ }
+ if (qid >= (int)priv->nr_virtqs) {
+ DRV_LOG(ERR, "Too big vring id: %d.", qid);
+ return -E2BIG;
+ }
+ if (!priv->caps.queue_counters_valid) {
+ DRV_LOG(ERR, "Virtq statistics is not supported for device %d.",
+ did);
+ return -ENOTSUP;
+ }
+ return mlx5_vdpa_virtq_stats_get(priv, qid, stats);
+}
+
static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
.get_queue_num = mlx5_vdpa_get_queue_num,
.get_features = mlx5_vdpa_get_vdpa_features,
@@ -286,6 +311,7 @@
.get_vfio_group_fd = NULL,
.get_vfio_device_fd = mlx5_vdpa_get_device_fd,
.get_notify_area = mlx5_vdpa_get_notify_area,
+ .get_stats = mlx5_vdpa_get_stats,
};
static struct ibv_device *
@@ -489,6 +515,8 @@
rte_errno = ENOTSUP;
goto error;
}
+ if (!attr.vdpa.queue_counters_valid)
+ DRV_LOG(DEBUG, "No capability to support virtq statistics.");
priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
sizeof(struct mlx5_vdpa_virtq) *
attr.vdpa.max_num_virtio_queues * 2,
@@ -76,6 +76,7 @@ struct mlx5_vdpa_virtq {
uint16_t vq_size;
struct mlx5_vdpa_priv *priv;
struct mlx5_devx_obj *virtq;
+ struct mlx5_devx_obj *counters;
struct mlx5_vdpa_event_qp eqp;
struct {
struct mlx5dv_devx_umem *obj;
@@ -352,4 +353,19 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
*/
int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
+/**
+ * Get virtq statistics.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] qid
+ * The virtq index.
+ * @param stats
+ * The virtq statistics structure to fill.
+ *
+ * @return
+ * 0 on success and @p stats is updated, a negative value otherwise.
+ */
+int mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+ struct rte_vdpa_queue_stats *stats);
#endif /* RTE_PMD_MLX5_VDPA_H_ */
@@ -72,6 +72,10 @@
rte_free(virtq->umems[i].buf);
}
memset(&virtq->umems, 0, sizeof(virtq->umems));
+ if (virtq->counters) {
+ claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
+ virtq->counters = NULL;
+ }
if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
return 0;
@@ -205,6 +209,16 @@
DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
" need event QPs and event mechanism.", index);
}
+ if (priv->caps.queue_counters_valid) {
+ virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
+ (priv->ctx);
+ if (!virtq->counters) {
+ DRV_LOG(ERR, "Failed to create virtq couners for virtq"
+ " %d.", index);
+ goto error;
+ }
+ attr.counters_obj_id = virtq->counters->id;
+ }
/* Setup 3 UMEMs for each virtq. */
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
@@ -448,3 +462,32 @@
}
return 0;
}
+
+int
+mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+ struct rte_vdpa_queue_stats *stats)
+{
+ struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
+ struct mlx5_devx_virtio_q_couners_attr attr = {0};
+ int ret;
+
+ if (!virtq->virtq) {
+ DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
+ "synchronization failed.", qid);
+ }
+ MLX5_ASSERT(virtq->counters);
+ ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
+ return ret;
+ }
+ *stats = (struct rte_vdpa_queue_stats) {
+ .received_desc = attr.received_desc,
+ .completed_desc = attr.completed_desc,
+ .bad_desc = attr.bad_desc_errors,
+ .exceed_max_chain = attr.exceed_max_chain,
+ .invalid_buffer = attr.invalid_buffer,
+ .errors = attr.error_cqes,
+ };
+ return 0;
+}