[v9,12/15] crypto/mlx5: add statistic get and reset operations

Message ID 20210720130944.5407-13-suanmingm@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series drivers: introduce mlx5 crypto PMD |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Suanming Mou July 20, 2021, 1:09 p.m. UTC
  This commit adds mlx5 crypto statistic get and reset operations.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Signed-off-by: Matan Azrad <matan@nvidia.com>
---
 doc/guides/rel_notes/release_21_08.rst |  5 ++++
 drivers/crypto/mlx5/mlx5_crypto.c      | 40 ++++++++++++++++++++++++--
 2 files changed, 42 insertions(+), 3 deletions(-)
  

Patch

diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 7d289e07e3..2bf4ce7a73 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -125,6 +125,11 @@  New Features
   The experimental PMD power management API now supports managing
   multiple Ethernet Rx queues per lcore.
 
+* **Added support for Nvidia crypto device driver.**
+
+  * Added mlx5 crypto driver to support AES-XTS cipher operations.
+    The first device to support it is ConnectX-6.
+
 
 Removed Items
 -------------
diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c
index 4cd1d41588..fc05bb7d46 100644
--- a/drivers/crypto/mlx5/mlx5_crypto.c
+++ b/drivers/crypto/mlx5/mlx5_crypto.c
@@ -506,13 +506,17 @@  mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		op = *ops++;
 		umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * qp->pi);
 		if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
-			if (remain != nb_ops)
+			qp->stats.enqueue_err_count++;
+			if (remain != nb_ops) {
+				qp->stats.enqueued_count -= remain;
 				break;
+			}
 			return 0;
 		}
 		qp->ops[qp->pi] = op;
 		qp->pi = (qp->pi + 1) & mask;
 	} while (--remain);
+	qp->stats.enqueued_count += nb_ops;
 	rte_io_wmb();
 	qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
 	rte_wmb();
@@ -529,6 +533,7 @@  mlx5_crypto_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)
 							&qp->cq_obj.cqes[idx];
 
 	op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+	qp->stats.dequeue_err_count++;
 	DRV_LOG(ERR, "CQE ERR:%x.\n", rte_be_to_cpu_32(cqe->syndrome));
 }
 
@@ -568,6 +573,7 @@  mlx5_crypto_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 	if (likely(i != 0)) {
 		rte_io_wmb();
 		qp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->ci);
+		qp->stats.dequeued_count += i;
 	}
 	return i;
 }
@@ -729,14 +735,42 @@  mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	return -1;
 }
 
+static void
+mlx5_crypto_stats_get(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+static void
+mlx5_crypto_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
 static struct rte_cryptodev_ops mlx5_crypto_ops = {
 	.dev_configure			= mlx5_crypto_dev_configure,
 	.dev_start			= mlx5_crypto_dev_start,
 	.dev_stop			= mlx5_crypto_dev_stop,
 	.dev_close			= mlx5_crypto_dev_close,
 	.dev_infos_get			= mlx5_crypto_dev_infos_get,
-	.stats_get			= NULL,
-	.stats_reset			= NULL,
+	.stats_get			= mlx5_crypto_stats_get,
+	.stats_reset			= mlx5_crypto_stats_reset,
 	.queue_pair_setup		= mlx5_crypto_queue_pair_setup,
 	.queue_pair_release		= mlx5_crypto_queue_pair_release,
 	.sym_session_get_size		= mlx5_crypto_sym_session_get_size,