[v4,11/16] dma/idxd: add operation statistic tracking

Message ID 20210917140252.2999006-12-kevin.laatz@intel.com (mailing list archive)
State Superseded, archived
Headers
Series add dmadev driver for idxd devices |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Kevin Laatz Sept. 17, 2021, 2:02 p.m. UTC
  Add statistic tracking for DSA devices.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/dmadevs/idxd.rst      | 11 +++++++++++
 drivers/dma/idxd/idxd_bus.c      |  2 ++
 drivers/dma/idxd/idxd_common.c   | 27 +++++++++++++++++++++++++++
 drivers/dma/idxd/idxd_internal.h |  3 +++
 drivers/dma/idxd/idxd_pci.c      |  2 ++
 5 files changed, 45 insertions(+)
  

Patch

diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index f942a8aa44..c81f1d15cc 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -249,3 +249,14 @@  of memory is overwritten, or filled, with a short pattern of data.
 Fill operations can be performed in much the same was as copy operations
 described above, just using the ``rte_dma_fill()`` function rather than the
 ``rte_dma_copy()`` function.
+
+Querying Device Statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The statistics from the IDXD dmadev device can be got via the stats functions in
+the ``rte_dmadev`` library, i.e. ``rte_dma_stats_get()``. The statistics
+returned for each device instance are:
+
+* ``submitted``: The number of operations submitted to the device.
+* ``completed``: The number of operations which have completed (successful and failed).
+* ``errors``: The number of operations that completed with error.
diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index b2acdac4f9..b52ea02854 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -99,6 +99,8 @@  static const struct rte_dma_dev_ops idxd_bus_ops = {
 		.dev_configure = idxd_configure,
 		.vchan_setup = idxd_vchan_setup,
 		.dev_info_get = idxd_info_get,
+		.stats_get = idxd_stats_get,
+		.stats_reset = idxd_stats_reset,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index a061a956c2..d86c58c12a 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -65,6 +65,8 @@  __submit(struct idxd_dmadev *idxd)
 	if (++idxd->batch_idx_write > idxd->max_batches)
 		idxd->batch_idx_write = 0;
 
+	idxd->stats.submitted += idxd->batch_size;
+
 	idxd->batch_start += idxd->batch_size;
 	idxd->batch_size = 0;
 	idxd->batch_idx_ring[idxd->batch_idx_write] = idxd->batch_start;
@@ -275,6 +277,8 @@  batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_
 	const uint16_t b_len = b_end - b_start;
 	if (b_len == 1) {/* not a batch */
 		*status = get_comp_status(&idxd->batch_comp_ring[idxd->batch_idx_read]);
+		if (status != RTE_DMA_STATUS_SUCCESSFUL)
+			idxd->stats.errors++;
 		idxd->ids_avail++;
 		idxd->ids_returned++;
 		idxd->batch_idx_read = next_batch;
@@ -296,6 +300,8 @@  batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_
 		struct idxd_completion *c = (void *)
 				&idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];
 		status[ret] = (ret < bcount) ? get_comp_status(c) : RTE_DMA_STATUS_NOT_ATTEMPTED;
+		if (status[ret] != RTE_DMA_STATUS_SUCCESSFUL)
+			idxd->stats.errors++;
 	}
 	idxd->ids_avail = idxd->ids_returned += ret;
 
@@ -354,6 +360,7 @@  idxd_completed(struct rte_dma_dev *dev, uint16_t qid __rte_unused, uint16_t max_
 		ret += batch;
 	} while (batch > 0 && *has_error == false);
 
+	idxd->stats.completed += ret;
 	*last_idx = idxd->ids_returned - 1;
 	return ret;
 }
@@ -371,6 +378,7 @@  idxd_completed_status(struct rte_dma_dev *dev, uint16_t qid __rte_unused, uint16
 		ret += batch;
 	} while (batch > 0);
 
+	idxd->stats.completed += ret;
 	*last_idx = idxd->ids_returned - 1;
 	return ret;
 }
@@ -404,6 +412,25 @@  idxd_dump(const struct rte_dma_dev *dev, FILE *f)
 	return 0;
 }
 
+int
+idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+		struct rte_dma_stats *stats, uint32_t stats_sz)
+{
+	struct idxd_dmadev *idxd = dev->dev_private;
+	if (stats_sz < sizeof(*stats))
+		return -EINVAL;
+	*stats = idxd->stats;
+	return 0;
+}
+
+int
+idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
+{
+	struct idxd_dmadev *idxd = dev->dev_private;
+	idxd->stats = (struct rte_dma_stats){0};
+	return 0;
+}
+
 int
 idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
 {
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index 15115a0966..e2a1119ef7 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -98,5 +98,8 @@  uint16_t idxd_completed(struct rte_dma_dev *dev, uint16_t qid, uint16_t max_ops,
 uint16_t idxd_completed_status(struct rte_dma_dev *dev, uint16_t qid __rte_unused,
 		uint16_t max_ops, uint16_t *last_idx,
 		enum rte_dma_status_code *status);
+int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
+		struct rte_dma_stats *stats, uint32_t stats_sz);
+int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index cfb64ce220..d73845aa3d 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -114,6 +114,8 @@  static const struct rte_dma_dev_ops idxd_pci_ops = {
 	.dev_configure = idxd_configure,
 	.vchan_setup = idxd_vchan_setup,
 	.dev_info_get = idxd_info_get,
+	.stats_get = idxd_stats_get,
+	.stats_reset = idxd_stats_reset,
 	.dev_start = idxd_pci_dev_start,
 	.dev_stop = idxd_pci_dev_stop,
 };