diff mbox series

[v3,12/17] dma/idxd: add operation statistic tracking

Message ID 20210908103016.1661914-13-kevin.laatz@intel.com (mailing list archive)
State Superseded
Delegated to: Thomas Monjalon
Headers show
Series add dmadev driver for idxd devices | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Kevin Laatz Sept. 8, 2021, 10:30 a.m. UTC
Add statistic tracking for DSA devices.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
---
 doc/guides/dmadevs/idxd.rst      | 11 +++++++++++
 drivers/dma/idxd/idxd_bus.c      |  2 ++
 drivers/dma/idxd/idxd_common.c   | 27 +++++++++++++++++++++++++++
 drivers/dma/idxd/idxd_internal.h |  3 +++
 drivers/dma/idxd/idxd_pci.c      |  2 ++
 5 files changed, 45 insertions(+)

Comments

Walsh, Conor Sept. 9, 2021, 11:25 a.m. UTC | #1
> Add statistic tracking for DSA devices.
>
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>

Reviewed-by: Conor Walsh <conor.walsh@intel.com>
diff mbox series

Patch

diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index b0b5632b48..634ef58985 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -242,3 +242,14 @@  of memory is overwritten, or filled, with a short pattern of data.
 Fill operations can be performed in much the same was as copy operations
 described above, just using the ``rte_dmadev_fill()`` function rather than the
 ``rte_dmadev_copy()`` function.
+
+Querying Device Statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The statistics from the IDXD dmadev device can be got via the stats functions in
+the ``rte_dmadev`` library, i.e. ``rte_dmadev_stats_get()``. The statistics
+returned for each device instance are:
+
+* ``submitted``
+* ``completed``
+* ``errors``
diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index 7a6afabd27..8781195d59 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -99,6 +99,8 @@  static const struct rte_dmadev_ops idxd_vdev_ops = {
 		.dev_configure = idxd_configure,
 		.vchan_setup = idxd_vchan_setup,
 		.dev_info_get = idxd_info_get,
+		.stats_get = idxd_stats_get,
+		.stats_reset = idxd_stats_reset,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 8eb73fdcc6..66d1b3432e 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -65,6 +65,8 @@  __submit(struct idxd_dmadev *idxd)
 	if (++idxd->batch_idx_write > idxd->max_batches)
 		idxd->batch_idx_write = 0;
 
+	idxd->stats.submitted += idxd->batch_size;
+
 	idxd->batch_start += idxd->batch_size;
 	idxd->batch_size = 0;
 	idxd->batch_idx_ring[idxd->batch_idx_write] = idxd->batch_start;
@@ -278,6 +280,8 @@  batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_
 	const uint16_t b_len = b_end - b_start;
 	if (b_len == 1) {/* not a batch */
 		*status = get_comp_status(&idxd->batch_comp_ring[idxd->batch_idx_read]);
+		if (status != RTE_DMA_STATUS_SUCCESSFUL)
+			idxd->stats.errors++;
 		idxd->ids_avail++;
 		idxd->ids_returned++;
 		idxd->batch_idx_read = next_batch;
@@ -299,6 +303,8 @@  batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_
 		struct idxd_completion *c = (void *)
 				&idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];
 		status[ret] = (ret < bcount) ? get_comp_status(c) : RTE_DMA_STATUS_NOT_ATTEMPTED;
+		if (status[ret] != RTE_DMA_STATUS_SUCCESSFUL)
+			idxd->stats.errors++;
 	}
 	idxd->ids_avail = idxd->ids_returned += ret;
 
@@ -357,6 +363,7 @@  idxd_completed(struct rte_dmadev *dev, uint16_t qid __rte_unused, uint16_t max_o
 		ret += batch;
 	} while (batch > 0 && *has_error == false);
 
+	idxd->stats.completed += ret;
 	*last_idx = idxd->ids_returned - 1;
 	return ret;
 }
@@ -374,6 +381,7 @@  idxd_completed_status(struct rte_dmadev *dev, uint16_t qid __rte_unused, uint16_
 		ret += batch;
 	} while (batch > 0);
 
+	idxd->stats.completed += ret;
 	*last_idx = idxd->ids_returned - 1;
 	return ret;
 }
@@ -407,6 +415,25 @@  idxd_dump(const struct rte_dmadev *dev, FILE *f)
 	return 0;
 }
 
+int
+idxd_stats_get(const struct rte_dmadev *dev, uint16_t vchan __rte_unused,
+		struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+	struct idxd_dmadev *idxd = dev->dev_private;
+	if (stats_sz < sizeof(*stats))
+		return -EINVAL;
+	*stats = idxd->stats;
+	return 0;
+}
+
+int
+idxd_stats_reset(struct rte_dmadev *dev, uint16_t vchan __rte_unused)
+{
+	struct idxd_dmadev *idxd = dev->dev_private;
+	idxd->stats = (struct rte_dmadev_stats){0};
+	return 0;
+}
+
 int
 idxd_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *info, uint32_t size)
 {
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index 84d45a09d6..c04ee002d8 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -98,5 +98,8 @@  uint16_t idxd_completed(struct rte_dmadev *dev, uint16_t qid, uint16_t max_ops,
 uint16_t idxd_completed_status(struct rte_dmadev *dev, uint16_t qid __rte_unused,
 		uint16_t max_ops, uint16_t *last_idx,
 		enum rte_dma_status_code *status);
+int idxd_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
+		struct rte_dmadev_stats *stats, uint32_t stats_sz);
+int idxd_stats_reset(struct rte_dmadev *dev, uint16_t vchan);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 3c0e3086f7..a84232b6e9 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -114,6 +114,8 @@  static const struct rte_dmadev_ops idxd_pci_ops = {
 	.dev_configure = idxd_configure,
 	.vchan_setup = idxd_vchan_setup,
 	.dev_info_get = idxd_info_get,
+	.stats_get = idxd_stats_get,
+	.stats_reset = idxd_stats_reset,
 	.dev_start = idxd_pci_dev_start,
 	.dev_stop = idxd_pci_dev_stop,
 };