@@ -16,6 +16,7 @@ sources = files('roc_dev.c',
'roc_mbox.c',
'roc_model.c',
'roc_nix.c',
+ 'roc_nix_irq.c',
'roc_npa.c',
'roc_npa_debug.c',
'roc_npa_irq.c',
@@ -363,6 +363,11 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
nix->reta_sz = reta_sz;
nix->mtu = ROC_NIX_DEFAULT_HW_FRS;
+ /* Register error and ras interrupts */
+ rc = nix_register_irqs(nix);
+ if (rc)
+ goto lf_detach;
+
/* Get NIX HW info */
roc_nix_get_hw_info(roc_nix);
nix->dev.drv_inited = true;
@@ -388,6 +393,8 @@ roc_nix_dev_fini(struct roc_nix *roc_nix)
if (!nix->dev.drv_inited)
goto fini;
+ nix_unregister_irqs(nix);
+
rc = nix_lf_detach(nix);
nix->dev.drv_inited = false;
fini:
@@ -81,4 +81,16 @@ int __roc_api roc_nix_lf_alloc(struct roc_nix *roc_nix, uint32_t nb_rxq,
uint32_t nb_txq, uint64_t rx_cfg);
int __roc_api roc_nix_lf_free(struct roc_nix *roc_nix);
+/* IRQ */
+void __roc_api roc_nix_rx_queue_intr_enable(struct roc_nix *roc_nix,
+ uint16_t rxq_id);
+void __roc_api roc_nix_rx_queue_intr_disable(struct roc_nix *roc_nix,
+ uint16_t rxq_id);
+void __roc_api roc_nix_err_intr_ena_dis(struct roc_nix *roc_nix, bool enb);
+void __roc_api roc_nix_ras_intr_ena_dis(struct roc_nix *roc_nix, bool enb);
+int __roc_api roc_nix_register_queue_irqs(struct roc_nix *roc_nix);
+void __roc_api roc_nix_unregister_queue_irqs(struct roc_nix *roc_nix);
+int __roc_api roc_nix_register_cq_irqs(struct roc_nix *roc_nix);
+void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
+
#endif /* _ROC_NIX_H_ */
new file mode 100644
@@ -0,0 +1,484 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+static void
+nix_err_intr_enb_dis(struct nix *nix, bool enb)
+{
+ /* Enable all nix lf error irqs except RQ_DISABLED and CQ_DISABLED */
+ if (enb)
+ plt_write64(~(BIT_ULL(11) | BIT_ULL(24)),
+ nix->base + NIX_LF_ERR_INT_ENA_W1S);
+ else
+ plt_write64(~0ull, nix->base + NIX_LF_ERR_INT_ENA_W1C);
+}
+
+static void
+nix_ras_intr_enb_dis(struct nix *nix, bool enb)
+{
+ if (enb)
+ plt_write64(~0ull, nix->base + NIX_LF_RAS_ENA_W1S);
+ else
+ plt_write64(~0ull, nix->base + NIX_LF_RAS_ENA_W1C);
+}
+
+void
+roc_nix_rx_queue_intr_enable(struct roc_nix *roc_nix, uint16_t rx_queue_id)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ /* Enable CINT interrupt */
+ plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1S(rx_queue_id));
+}
+
+void
+roc_nix_rx_queue_intr_disable(struct roc_nix *roc_nix, uint16_t rx_queue_id)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ /* Clear and disable CINT interrupt */
+ plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(rx_queue_id));
+}
+
+void
+roc_nix_err_intr_ena_dis(struct roc_nix *roc_nix, bool enb)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix_err_intr_enb_dis(nix, enb);
+}
+
+void
+roc_nix_ras_intr_ena_dis(struct roc_nix *roc_nix, bool enb)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix_ras_intr_enb_dis(nix, enb);
+}
+
+static void
+nix_lf_err_irq(void *param)
+{
+ struct nix *nix = (struct nix *)param;
+ struct dev *dev = &nix->dev;
+ uint64_t intr;
+
+ intr = plt_read64(nix->base + NIX_LF_ERR_INT);
+ if (intr == 0)
+ return;
+
+ plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
+
+ /* Clear interrupt */
+ plt_write64(intr, nix->base + NIX_LF_ERR_INT);
+}
+
+static int
+nix_lf_register_err_irq(struct nix *nix)
+{
+ struct plt_intr_handle *handle = &nix->pci_dev->intr_handle;
+ int rc, vec;
+
+ vec = nix->msixoff + NIX_LF_INT_VEC_ERR_INT;
+ /* Clear err interrupt */
+ nix_err_intr_enb_dis(nix, false);
+ /* Set used interrupt vectors */
+ rc = dev_irq_register(handle, nix_lf_err_irq, nix, vec);
+ /* Enable all dev interrupt except for RQ_DISABLED */
+ nix_err_intr_enb_dis(nix, true);
+
+ return rc;
+}
+
+static void
+nix_lf_unregister_err_irq(struct nix *nix)
+{
+ struct plt_intr_handle *handle = &nix->pci_dev->intr_handle;
+ int vec;
+
+ vec = nix->msixoff + NIX_LF_INT_VEC_ERR_INT;
+ /* Clear err interrupt */
+ nix_err_intr_enb_dis(nix, false);
+ dev_irq_unregister(handle, nix_lf_err_irq, nix, vec);
+}
+
+static void
+nix_lf_ras_irq(void *param)
+{
+ struct nix *nix = (struct nix *)param;
+ struct dev *dev = &nix->dev;
+ uint64_t intr;
+
+ intr = plt_read64(nix->base + NIX_LF_RAS);
+ if (intr == 0)
+ return;
+
+ plt_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
+ /* Clear interrupt */
+ plt_write64(intr, nix->base + NIX_LF_RAS);
+}
+
+static int
+nix_lf_register_ras_irq(struct nix *nix)
+{
+ struct plt_intr_handle *handle = &nix->pci_dev->intr_handle;
+ int rc, vec;
+
+ vec = nix->msixoff + NIX_LF_INT_VEC_POISON;
+ /* Clear err interrupt */
+ nix_ras_intr_enb_dis(nix, false);
+ /* Set used interrupt vectors */
+ rc = dev_irq_register(handle, nix_lf_ras_irq, nix, vec);
+ /* Enable dev interrupt */
+ nix_ras_intr_enb_dis(nix, true);
+
+ return rc;
+}
+
+static void
+nix_lf_unregister_ras_irq(struct nix *nix)
+{
+ struct plt_intr_handle *handle = &nix->pci_dev->intr_handle;
+ int vec;
+
+ vec = nix->msixoff + NIX_LF_INT_VEC_POISON;
+ /* Clear err interrupt */
+ nix_ras_intr_enb_dis(nix, false);
+ dev_irq_unregister(handle, nix_lf_ras_irq, nix, vec);
+}
+
+static inline uint8_t
+nix_lf_q_irq_get_and_clear(struct nix *nix, uint16_t q, uint32_t off,
+ uint64_t mask)
+{
+ uint64_t reg, wdata;
+ uint8_t qint;
+
+ wdata = (uint64_t)q << 44;
+ reg = roc_atomic64_add_nosync(wdata, (int64_t *)(nix->base + off));
+
+ if (reg & BIT_ULL(42) /* OP_ERR */) {
+ plt_err("Failed execute irq get off=0x%x", off);
+ return 0;
+ }
+ qint = reg & 0xff;
+ wdata &= mask;
+ plt_write64(wdata | qint, nix->base + off);
+
+ return qint;
+}
+
+static inline uint8_t
+nix_lf_rq_irq_get_and_clear(struct nix *nix, uint16_t rq)
+{
+ return nix_lf_q_irq_get_and_clear(nix, rq, NIX_LF_RQ_OP_INT, ~0xff00);
+}
+
+static inline uint8_t
+nix_lf_cq_irq_get_and_clear(struct nix *nix, uint16_t cq)
+{
+ return nix_lf_q_irq_get_and_clear(nix, cq, NIX_LF_CQ_OP_INT, ~0xff00);
+}
+
+static inline uint8_t
+nix_lf_sq_irq_get_and_clear(struct nix *nix, uint16_t sq)
+{
+ return nix_lf_q_irq_get_and_clear(nix, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
+}
+
+static inline void
+nix_lf_sq_debug_reg(struct nix *nix, uint32_t off)
+{
+ uint64_t reg;
+
+ reg = plt_read64(nix->base + off);
+ if (reg & BIT_ULL(44))
+ plt_err("SQ=%d err_code=0x%x", (int)((reg >> 8) & 0xfffff),
+ (uint8_t)(reg & 0xff));
+}
+
+static void
+nix_lf_cq_irq(void *param)
+{
+ struct nix_qint *cint = (struct nix_qint *)param;
+ struct nix *nix = cint->nix;
+
+ /* Clear interrupt */
+ plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_INT(cint->qintx));
+}
+
+static void
+nix_lf_q_irq(void *param)
+{
+ struct nix_qint *qint = (struct nix_qint *)param;
+ uint8_t irq, qintx = qint->qintx;
+ struct nix *nix = qint->nix;
+ struct dev *dev = &nix->dev;
+ int q, cq, rq, sq;
+ uint64_t intr;
+
+ intr = plt_read64(nix->base + NIX_LF_QINTX_INT(qintx));
+ if (intr == 0)
+ return;
+
+ plt_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d", intr, qintx,
+ dev->pf, dev->vf);
+
+ /* Handle RQ interrupts */
+ for (q = 0; q < nix->nb_rx_queues; q++) {
+ rq = q % nix->qints;
+ irq = nix_lf_rq_irq_get_and_clear(nix, rq);
+
+ if (irq & BIT_ULL(NIX_RQINT_DROP))
+ plt_err("RQ=%d NIX_RQINT_DROP", rq);
+
+ if (irq & BIT_ULL(NIX_RQINT_RED))
+ plt_err("RQ=%d NIX_RQINT_RED", rq);
+ }
+
+ /* Handle CQ interrupts */
+ for (q = 0; q < nix->nb_rx_queues; q++) {
+ cq = q % nix->qints;
+ irq = nix_lf_cq_irq_get_and_clear(nix, cq);
+
+ if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ plt_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
+
+ if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
+ plt_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
+
+ if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+ plt_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
+ }
+
+ /* Handle SQ interrupts */
+ for (q = 0; q < nix->nb_tx_queues; q++) {
+ sq = q % nix->qints;
+ irq = nix_lf_sq_irq_get_and_clear(nix, sq);
+
+ if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) {
+ plt_err("SQ=%d NIX_SQINT_LMT_ERR", sq);
+ nix_lf_sq_debug_reg(nix, NIX_LF_SQ_OP_ERR_DBG);
+ }
+ if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
+ plt_err("SQ=%d NIX_SQINT_MNQ_ERR", sq);
+ nix_lf_sq_debug_reg(nix, NIX_LF_MNQ_ERR_DBG);
+ }
+ if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) {
+ plt_err("SQ=%d NIX_SQINT_SEND_ERR", sq);
+ nix_lf_sq_debug_reg(nix, NIX_LF_SEND_ERR_DBG);
+ }
+ if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) {
+ plt_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
+ nix_lf_sq_debug_reg(nix, NIX_LF_SEND_ERR_DBG);
+ }
+ }
+
+ /* Clear interrupt */
+ plt_write64(intr, nix->base + NIX_LF_QINTX_INT(qintx));
+}
+
+int
+roc_nix_register_queue_irqs(struct roc_nix *roc_nix)
+{
+ int vec, q, sqs, rqs, qs, rc = 0;
+ struct plt_intr_handle *handle;
+ struct nix *nix;
+
+ nix = roc_nix_to_nix_priv(roc_nix);
+ handle = &nix->pci_dev->intr_handle;
+
+ /* Figure out max qintx required */
+ rqs = PLT_MIN(nix->qints, nix->nb_rx_queues);
+ sqs = PLT_MIN(nix->qints, nix->nb_tx_queues);
+ qs = PLT_MAX(rqs, sqs);
+
+ nix->configured_qints = qs;
+
+ nix->qints_mem =
+ plt_zmalloc(nix->configured_qints * sizeof(struct nix_qint), 0);
+ if (nix->qints_mem == NULL)
+ return -ENOMEM;
+
+ for (q = 0; q < qs; q++) {
+ vec = nix->msixoff + NIX_LF_INT_VEC_QINT_START + q;
+
+ /* Clear QINT CNT */
+ plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
+
+ /* Clear interrupt */
+ plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1C(q));
+
+ nix->qints_mem[q].nix = nix;
+ nix->qints_mem[q].qintx = q;
+
+ /* Sync qints_mem update */
+ plt_wmb();
+
+ /* Register queue irq vector */
+ rc = dev_irq_register(handle, nix_lf_q_irq, &nix->qints_mem[q],
+ vec);
+ if (rc)
+ break;
+
+ plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
+ plt_write64(0, nix->base + NIX_LF_QINTX_INT(q));
+ /* Enable QINT interrupt */
+ plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1S(q));
+ }
+
+ return rc;
+}
+
+void
+roc_nix_unregister_queue_irqs(struct roc_nix *roc_nix)
+{
+ struct plt_intr_handle *handle;
+ struct nix *nix;
+ int vec, q;
+
+ nix = roc_nix_to_nix_priv(roc_nix);
+ handle = &nix->pci_dev->intr_handle;
+
+ for (q = 0; q < nix->configured_qints; q++) {
+ vec = nix->msixoff + NIX_LF_INT_VEC_QINT_START + q;
+
+ /* Clear QINT CNT */
+ plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
+ plt_write64(0, nix->base + NIX_LF_QINTX_INT(q));
+
+ /* Clear interrupt */
+ plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1C(q));
+
+ /* Unregister queue irq vector */
+ dev_irq_unregister(handle, nix_lf_q_irq, &nix->qints_mem[q],
+ vec);
+ }
+ nix->configured_qints = 0;
+
+ plt_free(nix->qints_mem);
+ nix->qints_mem = NULL;
+}
+
+int
+roc_nix_register_cq_irqs(struct roc_nix *roc_nix)
+{
+ struct plt_intr_handle *handle;
+ uint8_t rc = 0, vec, q;
+ struct nix *nix;
+
+ nix = roc_nix_to_nix_priv(roc_nix);
+ handle = &nix->pci_dev->intr_handle;
+
+ nix->configured_cints = PLT_MIN(nix->cints, nix->nb_rx_queues);
+
+ nix->cints_mem =
+ plt_zmalloc(nix->configured_cints * sizeof(struct nix_qint), 0);
+ if (nix->cints_mem == NULL)
+ return -ENOMEM;
+
+ for (q = 0; q < nix->configured_cints; q++) {
+ vec = nix->msixoff + NIX_LF_INT_VEC_CINT_START + q;
+
+ /* Clear CINT CNT */
+ plt_write64(0, nix->base + NIX_LF_CINTX_CNT(q));
+
+ /* Clear interrupt */
+ plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(q));
+
+ nix->cints_mem[q].nix = nix;
+ nix->cints_mem[q].qintx = q;
+
+ /* Sync cints_mem update */
+ plt_wmb();
+
+ /* Register queue irq vector */
+ rc = dev_irq_register(handle, nix_lf_cq_irq, &nix->cints_mem[q],
+ vec);
+ if (rc) {
+ plt_err("Fail to register CQ irq, rc=%d", rc);
+ return rc;
+ }
+
+ if (!handle->intr_vec) {
+ handle->intr_vec = plt_zmalloc(
+ nix->configured_cints * sizeof(int), 0);
+ if (!handle->intr_vec) {
+ plt_err("Failed to allocate %d rx intr_vec",
+ nix->configured_cints);
+ return -ENOMEM;
+ }
+ }
+ /* VFIO vector zero is resereved for misc interrupt so
+ * doing required adjustment. (b13bfab4cd)
+ */
+ handle->intr_vec[q] = PLT_INTR_VEC_RXTX_OFFSET + vec;
+
+ /* Configure CQE interrupt coalescing parameters */
+ plt_write64(((CQ_CQE_THRESH_DEFAULT) |
+ (CQ_CQE_THRESH_DEFAULT << 32) |
+ (CQ_TIMER_THRESH_DEFAULT << 48)),
+ nix->base + NIX_LF_CINTX_WAIT((q)));
+
+ /* Keeping the CQ interrupt disabled as the rx interrupt
+ * feature needs to be enabled/disabled on demand.
+ */
+ }
+
+ return rc;
+}
+
+void
+roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix)
+{
+ struct plt_intr_handle *handle;
+ struct nix *nix;
+ int vec, q;
+
+ nix = roc_nix_to_nix_priv(roc_nix);
+ handle = &nix->pci_dev->intr_handle;
+
+ for (q = 0; q < nix->configured_cints; q++) {
+ vec = nix->msixoff + NIX_LF_INT_VEC_CINT_START + q;
+
+ /* Clear CINT CNT */
+ plt_write64(0, nix->base + NIX_LF_CINTX_CNT(q));
+
+ /* Clear interrupt */
+ plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(q));
+
+ /* Unregister queue irq vector */
+ dev_irq_unregister(handle, nix_lf_cq_irq, &nix->cints_mem[q],
+ vec);
+ }
+ plt_free(nix->cints_mem);
+}
+
+int
+nix_register_irqs(struct nix *nix)
+{
+ int rc;
+
+ if (nix->msixoff == MSIX_VECTOR_INVALID) {
+ plt_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
+ nix->msixoff);
+ return NIX_ERR_PARAM;
+ }
+
+ /* Register lf err interrupt */
+ rc = nix_lf_register_err_irq(nix);
+ /* Register RAS interrupt */
+ rc |= nix_lf_register_ras_irq(nix);
+
+ return rc;
+}
+
+void
+nix_unregister_irqs(struct nix *nix)
+{
+ nix_lf_unregister_err_irq(nix);
+ nix_lf_unregister_ras_irq(nix);
+}
@@ -18,11 +18,25 @@
/* Apply BP/DROP when CQ is 95% full */
#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
+/* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */
+#define CQ_CQE_THRESH_DEFAULT 0x1ULL
+#define CQ_TIMER_THRESH_DEFAULT 0xAULL /* ~1usec i.e (0xA * 100nsec) */
+#define CQ_TIMER_THRESH_MAX 255
+
+struct nix_qint {
+ struct nix *nix;
+ uint8_t qintx;
+};
+
struct nix {
uint16_t reta[ROC_NIX_RSS_GRPS][ROC_NIX_RSS_RETA_MAX];
enum roc_nix_rss_reta_sz reta_sz;
struct plt_pci_device *pci_dev;
uint16_t bpid[NIX_MAX_CHAN];
+ struct nix_qint *qints_mem;
+ struct nix_qint *cints_mem;
+ uint8_t configured_qints;
+ uint8_t configured_cints;
struct roc_nix_sq **sqs;
uint16_t vwqe_interval;
uint16_t tx_chan_base;
@@ -97,4 +111,8 @@ nix_priv_to_roc_nix(struct nix *nix)
offsetof(struct roc_nix, reserved));
}
+/* IRQ */
+int nix_register_irqs(struct nix *nix);
+void nix_unregister_irqs(struct nix *nix);
+
#endif /* _ROC_NIX_PRIV_H_ */
@@ -15,6 +15,7 @@ INTERNAL {
roc_model;
roc_nix_dev_fini;
roc_nix_dev_init;
+ roc_nix_err_intr_ena_dis;
roc_nix_get_base_chan;
roc_nix_get_pf;
roc_nix_get_pf_func;
@@ -27,6 +28,13 @@ INTERNAL {
roc_nix_lf_alloc;
roc_nix_lf_free;
roc_nix_max_pkt_len;
+ roc_nix_ras_intr_ena_dis;
+ roc_nix_register_cq_irqs;
+ roc_nix_register_queue_irqs;
+ roc_nix_rx_queue_intr_disable;
+ roc_nix_rx_queue_intr_enable;
+ roc_nix_unregister_cq_irqs;
+ roc_nix_unregister_queue_irqs;
roc_npa_aura_limit_modify;
roc_npa_aura_op_range_set;
roc_npa_ctx_dump;