[v3,09/11] compress/mlx5: add statistics operations

Message ID 1611142175-409485-10-git-send-email-matan@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series add mlx5 compress PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Matan Azrad Jan. 20, 2021, 11:29 a.m. UTC
Add support for the next statistics operations:
	- stats_get
	- stats_reset

These statistics are counted by the SW data-path.

Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/compress/mlx5/mlx5_compress.c | 36 +++++++++++++++++++++++++++++++++--
 1 file changed, 34 insertions(+), 2 deletions(-)
  

Patch

diff --git a/drivers/compress/mlx5/mlx5_compress.c b/drivers/compress/mlx5/mlx5_compress.c
index 17aa206..7a43d9e 100644
--- a/drivers/compress/mlx5/mlx5_compress.c
+++ b/drivers/compress/mlx5/mlx5_compress.c
@@ -65,6 +65,7 @@  struct mlx5_compress_qp {
 	struct mlx5_pmd_mr opaque_mr;
 	struct rte_comp_op **ops;
 	struct mlx5_compress_priv *priv;
+	struct rte_compressdev_stats stats;
 };
 
 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
@@ -362,14 +363,42 @@  struct mlx5_compress_qp {
 	return 0;
 }
 
+static void
+mlx5_compress_stats_get(struct rte_compressdev *dev,
+		struct rte_compressdev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+static void
+mlx5_compress_stats_reset(struct rte_compressdev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
 static struct rte_compressdev_ops mlx5_compress_ops = {
 	.dev_configure		= mlx5_compress_dev_configure,
 	.dev_start		= mlx5_compress_dev_start,
 	.dev_stop		= mlx5_compress_dev_stop,
 	.dev_close		= mlx5_compress_dev_close,
 	.dev_infos_get		= mlx5_compress_dev_info_get,
-	.stats_get		= NULL,
-	.stats_reset		= NULL,
+	.stats_get		= mlx5_compress_stats_get,
+	.stats_reset		= mlx5_compress_stats_reset,
 	.queue_pair_setup	= mlx5_compress_qp_setup,
 	.queue_pair_release	= mlx5_compress_qp_release,
 	.private_xform_create	= mlx5_compress_xform_create,
@@ -453,6 +482,7 @@  struct mlx5_compress_qp {
 		qp->ops[idx] = op;
 		qp->pi++;
 	} while (--remain);
+	qp->stats.enqueued_count += nb_ops;
 	rte_io_wmb();
 	qp->sq.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);
 	rte_wmb();
@@ -501,6 +531,7 @@  struct mlx5_compress_qp {
 	mlx5_compress_dump_err_objs((volatile uint32_t *)cqe,
 				 (volatile uint32_t *)&wqes[idx],
 				 (volatile uint32_t *)&opaq[idx]);
+	qp->stats.dequeue_err_count++;
 }
 
 static uint16_t
@@ -571,6 +602,7 @@  struct mlx5_compress_qp {
 	if (likely(i != 0)) {
 		rte_io_wmb();
 		qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci);
+		qp->stats.dequeued_count += i;
 	}
 	return i;
 }