Add update of the relevant stats for the data path functions and point the
overall device struct xstats function pointers to the existing ioat
functions.
At this point, all necessary hooks for supporting the existing unit tests
are in place so call them for each device.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/raw/ioat/idxd_pci.c | 3 +++
drivers/raw/ioat/idxd_vdev.c | 3 +++
drivers/raw/ioat/ioat_rawdev_test.c | 2 +-
drivers/raw/ioat/rte_ioat_rawdev_fns.h | 26 ++++++++++++++++----------
4 files changed, 23 insertions(+), 11 deletions(-)
@@ -108,6 +108,9 @@ static const struct rte_rawdev_ops idxd_pci_ops = {
.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
.dev_info_get = idxd_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
};
/* each portal uses 4 x 4k pages */
@@ -87,6 +87,9 @@ static const struct rte_rawdev_ops idxd_vdev_ops = {
.dev_start = idxd_vdev_start,
.dev_stop = idxd_vdev_stop,
.dev_info_get = idxd_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
};
static void *
@@ -258,5 +258,5 @@ int
idxd_rawdev_test(uint16_t dev_id)
{
rte_rawdev_dump(dev_id, stdout);
- return 0;
+ return ioat_rawdev_test(dev_id);
}
@@ -182,6 +182,8 @@ struct rte_idxd_user_hdl {
*/
struct rte_idxd_rawdev {
enum rte_ioat_dev_type type;
+ struct rte_ioat_xstats xstats;
+
void *portal; /* address to write the batch descriptor */
/* counters to track the batches and the individual op handles */
@@ -330,20 +332,16 @@ __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
IDXD_FLAG_CACHE_CONTROL;
/* check for room in the handle ring */
- if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl) {
- rte_errno = ENOSPC;
- return 0;
- }
+ if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
+ goto failed;
+
if (b->op_count >= BATCH_SIZE) {
/* TODO change to submit batch and move on */
- rte_errno = ENOSPC;
- return 0;
+ goto failed;
}
/* check that we can actually use the current batch */
- if (b->submitted) {
- rte_errno = ENOSPC;
- return 0;
- }
+ if (b->submitted)
+ goto failed;
/* write the descriptor */
b->ops[b->op_count++] = (struct rte_idxd_hw_desc){
@@ -362,7 +360,13 @@ __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
idxd->next_free_hdl = 0;
+ idxd->xstats.enqueued++;
return 1;
+
+failed:
+ idxd->xstats.enqueue_failed++;
+ rte_errno = ENOSPC;
+ return 0;
}
static __rte_always_inline void
@@ -389,6 +393,7 @@ __idxd_perform_ops(int dev_id)
if (++idxd->next_batch == idxd->batch_ring_sz)
idxd->next_batch = 0;
+ idxd->xstats.started = idxd->xstats.enqueued;
}
static __rte_always_inline int
@@ -425,6 +430,7 @@ __idxd_completed_ops(int dev_id, uint8_t max_ops,
idxd->next_ret_hdl = h_idx;
+ idxd->xstats.completed += n;
return n;
}