From: Ashwin Sekhar T K <asekhar@marvell.com>
Add APIs to read NPA performance counters.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_npa.c | 50 +++++++++++++++++++++++++++++++++++++++++
drivers/common/cnxk/roc_npa.h | 37 ++++++++++++++++++++++++++++++
drivers/common/cnxk/version.map | 1 +
3 files changed, 88 insertions(+)
@@ -131,6 +131,56 @@ npa_aura_pool_fini(struct mbox *mbox, uint32_t aura_id, uint64_t aura_handle)
return 0;
}
+int
+roc_npa_pool_op_pc_reset(uint64_t aura_handle)
+{
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aq_enq_req *pool_req;
+ struct npa_aq_enq_rsp *pool_rsp;
+ struct ndc_sync_op *ndc_req;
+ struct mbox_dev *mdev;
+ int rc = -ENOSPC, off;
+ struct mbox *mbox;
+
+ if (lf == NULL)
+ return NPA_ERR_PARAM;
+
+ mbox = lf->mbox;
+ mdev = &mbox->dev[0];
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
+
+ pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (pool_req == NULL)
+ return rc;
+ pool_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_WRITE;
+ pool_req->pool.op_pc = 0;
+ pool_req->pool_mask.op_pc = ~pool_req->pool_mask.op_pc;
+
+ rc = mbox_process(mbox);
+ if (rc < 0)
+ return rc;
+
+ off = mbox->rx_start +
+ PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+
+ if (pool_rsp->hdr.rc != 0)
+ return NPA_ERR_AURA_POOL_FINI;
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+ if (ndc_req == NULL)
+ return -ENOSPC;
+ ndc_req->npa_lf_sync = 1;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+ return NPA_ERR_AURA_POOL_FINI;
+ }
+ return 0;
+}
static inline char *
npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name)
{
@@ -146,6 +146,40 @@ roc_npa_aura_op_available(uint64_t aura_handle)
return reg & 0xFFFFFFFFF;
}
+static inline uint64_t
+roc_npa_pool_op_performance_counter(uint64_t aura_handle, const int drop)
+{
+ union {
+ uint64_t u;
+ struct npa_aura_op_wdata_s s;
+ } op_wdata;
+ int64_t *addr;
+ uint64_t reg;
+
+ op_wdata.u = 0;
+ op_wdata.s.aura = roc_npa_aura_handle_to_aura(aura_handle);
+ if (drop)
+ op_wdata.s.drop |= BIT_ULL(63); /* DROP */
+
+ addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +
+ NPA_LF_POOL_OP_PC);
+
+ reg = roc_atomic64_add_nosync(op_wdata.u, addr);
+ /*
+ * NPA_LF_POOL_OP_PC Read Data
+ *
+ * 63 49 48 48 47 0
+ * -----------------------------
+ * | Reserved | OP_ERR | OP_PC |
+ * -----------------------------
+ */
+
+ if (reg & BIT_ULL(48) /* OP_ERR */)
+ return 0;
+ else
+ return reg & 0xFFFFFFFFFFFF;
+}
+
static inline void
roc_npa_aura_op_bulk_free(uint64_t aura_handle, uint64_t const *buf,
unsigned int num, const int fabs)
@@ -396,4 +430,7 @@ void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
int __roc_api roc_npa_ctx_dump(void);
int __roc_api roc_npa_dump(void);
+/* Reset operation performance counter. */
+int __roc_api roc_npa_pool_op_pc_reset(uint64_t aura_handle);
+
#endif /* _ROC_NPA_H_ */
@@ -19,6 +19,7 @@ INTERNAL {
roc_npa_dump;
roc_npa_pool_create;
roc_npa_pool_destroy;
+ roc_npa_pool_op_pc_reset;
roc_npa_pool_range_update_check;
roc_plt_init;
roc_plt_init_cb_register;