@@ -2,6 +2,7 @@
* Copyright(C) 2019 Marvell International Ltd.
*/
+#include <rte_ethdev_driver.h>
#include "otx2_common.h"
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
@@ -282,6 +283,33 @@ otx2_vf_disable_io_queues(struct otx_ep_device *otx_ep)
}
}
+static uint32_t
+otx2_vf_update_read_index(struct otx_ep_instr_queue *iq)
+{
+ uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
+
+ if (new_idx == 0xFFFFFFFF) {
+ otx_ep_dbg("%s Going to reset IQ index\n", __func__);
+ rte_write32(new_idx, iq->inst_cnt_reg);
+ }
+
+ /* The new instr cnt reg is a 32-bit counter that can roll over.
+ * We have noted the counter's initial value at init time into
+ * reset_instr_cnt
+ */
+ if (iq->reset_instr_cnt < new_idx)
+ new_idx -= iq->reset_instr_cnt;
+ else
+ new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ new_idx %= iq->nb_desc;
+
+ return new_idx;
+}
+
static const struct otx_ep_config default_otx2_ep_conf = {
/* IQ attributes */
.iq = {
@@ -313,6 +341,38 @@ otx2_ep_get_defconf(struct otx_ep_device *otx_ep_dev __rte_unused)
return default_conf;
}
+static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+ union out_cnts_t out_cnts;
+
+ out_int_lvl.s.time_cnt_en = 1;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_cnts.d64 = 0;
+ out_cnts.s.resend = 1;
+ otx2_write64(out_cnts.d64, otx_epvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ return 0;
+}
+
+static int otx2_vf_disable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+
+ /* Disable the interrupt for this queue */
+ out_int_lvl.d64 = otx2_read64(otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 0;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+
+ return 0;
+}
+
int
otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
{
@@ -340,6 +400,7 @@ otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
otx_ep->fn_list.setup_oq_regs = otx2_vf_setup_oq_regs;
otx_ep->fn_list.setup_device_regs = otx2_vf_setup_device_regs;
+ otx_ep->fn_list.update_iq_read_idx = otx2_vf_update_read_index;
otx_ep->fn_list.enable_io_queues = otx2_vf_enable_io_queues;
otx_ep->fn_list.disable_io_queues = otx2_vf_disable_io_queues;
@@ -349,6 +410,8 @@ otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
otx_ep->fn_list.enable_oq = otx2_vf_enable_oq;
otx_ep->fn_list.disable_oq = otx2_vf_disable_oq;
+ otx_ep->fn_list.enable_rxq_intr = otx2_vf_enable_rxq_intr;
+ otx_ep->fn_list.disable_rxq_intr = otx2_vf_disable_rxq_intr;
return 0;
}
@@ -26,5 +26,31 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+union out_int_lvl_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timet:22;
+ uint64_t max_len:7;
+ uint64_t max_len_en:1;
+ uint64_t time_cnt_en:1;
+ uint64_t bmode:1;
+ } s;
+};
+
+union out_cnts_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timer:22;
+ uint64_t rsvd:5;
+ uint64_t resend:1;
+ uint64_t mbox_int:1;
+ uint64_t in_int:1;
+ uint64_t out_int:1;
+ uint64_t send_ism:1;
+ } s;
+};
+
#endif /*_OTX2_EP_VF_H_ */
@@ -122,6 +122,37 @@ typedef union otx_ep_instr_ih {
} s;
} otx_ep_instr_ih_t;
+
+
+typedef union otx_ep_resp_hdr {
+ uint64_t u64;
+ struct {
+ /** The request id for a packet thats in response
+ * to pkt sent by host.
+ */
+ uint64_t request_id:16;
+
+ /** Reserved. */
+ uint64_t reserved:2;
+
+ /** checksum verified. */
+ uint64_t csum_verified:2;
+
+ /** The destination Queue port. */
+ uint64_t dest_qport:22;
+
+ /** The source port for a packet thats in response
+ * to pkt sent by host.
+ */
+ uint64_t src_port:6;
+
+ /** Opcode for this packet. */
+ uint64_t opcode:16;
+ } s;
+} otx_ep_resp_hdr_t;
+
+#define OTX_EP_RESP_HDR_SIZE (sizeof(otx_ep_resp_hdr_t))
+
/* OTX_EP IQ request list */
struct otx_ep_instr_list {
void *buf;
@@ -210,6 +241,17 @@ struct otx_ep_instr_queue {
const struct rte_memzone *iq_mz;
};
+/* DROQ packet format for application i/f. */
+struct otx_ep_droq_pkt {
+ /* DROQ packet data buffer pointer. */
+ uint8_t *data;
+
+ /* DROQ packet data length */
+ uint32_t len;
+
+ uint32_t misc;
+};
+
/** Descriptor format.
* The descriptor ring is made of descriptors which have 2 64-bit values:
* -# Physical (bus) address of the data buffer.
@@ -395,6 +437,7 @@ struct otx_ep_fn_list {
void (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
int (*setup_device_regs)(struct otx_ep_device *otx_ep);
+ uint32_t (*update_iq_read_idx)(struct otx_ep_instr_queue *iq);
void (*enable_io_queues)(struct otx_ep_device *otx_ep);
void (*disable_io_queues)(struct otx_ep_device *otx_ep);
@@ -404,6 +447,8 @@ struct otx_ep_fn_list {
void (*enable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
void (*disable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
+ int (*enable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
+ int (*disable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
};
/* SRIOV information */
@@ -508,8 +553,16 @@ struct otx_ep_buf_free_info {
struct otx_ep_gather g;
};
+int
+otx_ep_register_irq(struct rte_intr_handle *intr_handle, unsigned int vec);
+
+void
+otx_ep_unregister_irq(struct rte_intr_handle *intr_handle, unsigned int vec);
+
#define OTX_EP_MAX_PKT_SZ 64000U
#define OTX_EP_MAX_MAC_ADDRS 1
#define OTX_EP_SG_ALIGN 8
+#define SDP_VF_R_MSIX_START (0x0)
+#define SDP_VF_R_MSIX(ring) (SDP_VF_R_MSIX_START + (ring))
#endif /* _OTX_EP_COMMON_H_ */
@@ -2,6 +2,7 @@
* Copyright(C) 2019 Marvell International Ltd.
*/
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_io.h>
@@ -12,6 +13,14 @@
#include "otx2_ep_vf.h"
#include "otx_ep_rxtx.h"
+#include <linux/vfio.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define MAX_INTR_VEC_ID RTE_MAX_RXTX_INTR_VEC_ID
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (MAX_INTR_VEC_ID))
#define OTX_EP_DEV(_eth_dev) ((_eth_dev)->data->dev_private)
static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
@@ -186,6 +195,55 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
return -ENOMEM;
}
+static int otx_epvf_setup_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ struct rte_eth_dev *eth_dev = otx_epvf->eth_dev;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int rc, vec;
+
+ vec = SDP_VF_R_MSIX(q_no);
+
+ rc = otx_ep_register_irq(handle, vec);
+ if (rc) {
+ otx_ep_err("Fail to register Rx irq, rc=%d", rc);
+ return rc;
+ }
+
+ if (!handle->intr_vec) {
+ handle->intr_vec = rte_zmalloc("intr_vec",
+ otx_epvf->max_rx_queues *
+ sizeof(int), 0);
+ if (!handle->intr_vec) {
+ otx_ep_err("Failed to allocate %d rx intr_vec",
+ otx_epvf->max_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* VFIO vector zero is resereved for misc interrupt so
+ * doing required adjustment.
+ */
+ handle->intr_vec[q_no] = RTE_INTR_VEC_RXTX_OFFSET + vec;
+
+ return rc;
+}
+
+static void otx_epvf_unset_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ /* Not yet implemented */
+ struct rte_eth_dev *eth_dev = otx_epvf->eth_dev;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int vec;
+
+ vec = SDP_VF_R_MSIX(q_no);
+ otx_epvf->fn_list.disable_rxq_intr(otx_epvf, q_no);
+ otx_ep_unregister_irq(handle, vec);
+}
+
static int
otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
{
@@ -195,6 +253,7 @@ otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
struct rte_eth_rxmode *rxmode = &conf->rxmode;
struct rte_eth_txmode *txmode = &conf->txmode;
uint32_t ethdev_queues;
+ uint16_t q;
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
if (eth_dev->data->nb_rx_queues > ethdev_queues ||
@@ -209,9 +268,177 @@ otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
otx_epvf->rx_offloads = rxmode->offloads;
otx_epvf->tx_offloads = txmode->offloads;
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ for (q = 0; q < eth_dev->data->nb_rx_queues; q++)
+ otx_epvf_setup_rxq_intr(otx_epvf, q);
+ }
return 0;
}
+static int
+irq_get_info(struct rte_intr_handle *intr_handle)
+{
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ int rc;
+
+ irq.index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (rc < 0) {
+ otx_ep_err("Failed to get IRQ info rc=%d errno=%d", rc, errno);
+ return rc;
+ }
+
+ otx_ep_dbg("Flags=0x%x index=0x%x count=0x%x max_intr_vec_id=0x%x",
+ irq.flags, irq.index, irq.count, MAX_INTR_VEC_ID);
+
+ if (irq.count > MAX_INTR_VEC_ID) {
+ otx_ep_err("HW max=%d > MAX_INTR_VEC_ID: %d",
+ intr_handle->max_intr, MAX_INTR_VEC_ID);
+ intr_handle->max_intr = MAX_INTR_VEC_ID;
+ } else {
+ intr_handle->max_intr = irq.count;
+ }
+
+ return 0;
+}
+
+static int
+irq_init(struct rte_intr_handle *intr_handle)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int32_t *fd_ptr;
+ int len, rc;
+ uint32_t i;
+
+ if (intr_handle->max_intr > MAX_INTR_VEC_ID) {
+ otx_ep_err("Max_intr=%d greater than MAX_INTR_VEC_ID=%d",
+ intr_handle->max_intr, MAX_INTR_VEC_ID);
+ return -ERANGE;
+ }
+
+ len = sizeof(struct vfio_irq_set) +
+ sizeof(int32_t) * intr_handle->max_intr;
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->start = 0;
+ irq_set->count = intr_handle->max_intr;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ for (i = 0; i < irq_set->count; i++)
+ fd_ptr[i] = -1;
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ otx_ep_err("Failed to set irqs vector rc=%d", rc);
+
+ return rc;
+}
+
+static int
+irq_config(struct rte_intr_handle *intr_handle, unsigned int vec)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int32_t *fd_ptr;
+ int len, rc;
+
+ if (vec > intr_handle->max_intr) {
+ otx_ep_err("vector=%d greater than max_intr=%d", vec,
+ intr_handle->max_intr);
+ return -EINVAL;
+ }
+
+ len = sizeof(struct vfio_irq_set) + sizeof(int32_t);
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->start = vec;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ /* Use vec fd to set interrupt vectors */
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ fd_ptr[0] = intr_handle->efds[vec];
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ otx_ep_err("Failed to set_irqs vector=0x%x rc=%d", vec, rc);
+
+ return rc;
+}
+
+int
+otx_ep_register_irq(struct rte_intr_handle *intr_handle, unsigned int vec)
+{
+ struct rte_intr_handle tmp_handle;
+
+ /* If no max_intr read from VFIO */
+ if (intr_handle->max_intr == 0) {
+ irq_get_info(intr_handle);
+ irq_init(intr_handle);
+ }
+
+ if (vec > intr_handle->max_intr) {
+ otx_ep_err("Vector=%d greater than max_intr=%d", vec,
+ intr_handle->max_intr);
+ return -EINVAL;
+ }
+
+ tmp_handle = *intr_handle;
+ /* Create new eventfd for interrupt vector */
+ tmp_handle.fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (tmp_handle.fd == -1)
+ return -ENODEV;
+
+ intr_handle->efds[vec] = tmp_handle.fd;
+ intr_handle->nb_efd = ((vec + 1) > intr_handle->nb_efd) ?
+ (vec + 1) : intr_handle->nb_efd;
+ intr_handle->max_intr = RTE_MAX(intr_handle->nb_efd + 1,
+ intr_handle->max_intr);
+
+ otx_ep_dbg("Enable vector:0x%x for vfio (efds: %d, max:%d)",
+ vec, intr_handle->nb_efd, intr_handle->max_intr);
+
+ /* Enable MSIX vectors to VFIO */
+ return irq_config(intr_handle, vec);
+}
+
+/**
+ * @internal
+ * Unregister IRQ
+ */
+void
+otx_ep_unregister_irq(struct rte_intr_handle *intr_handle, unsigned int vec)
+{
+ struct rte_intr_handle tmp_handle;
+
+ if (vec > intr_handle->max_intr) {
+ otx_ep_err("Error unregistering MSI-X interrupts vec:%d > %d",
+ vec, intr_handle->max_intr);
+ return;
+ }
+
+ tmp_handle = *intr_handle;
+ tmp_handle.fd = intr_handle->efds[vec];
+ if (tmp_handle.fd == -1)
+ return;
+
+ otx_ep_dbg("Disable vector:0x%x for vfio (efds: %d, max:%d)",
+ vec, intr_handle->nb_efd, intr_handle->max_intr);
+
+ if (intr_handle->efds[vec] != -1)
+ close(intr_handle->efds[vec]);
+ /* Disable MSIX vectors from VFIO */
+ intr_handle->efds[vec] = -1;
+ irq_config(intr_handle, vec);
+}
/**
* Setup our receive queue/ringbuffer. This is the
* queue the Octeon uses to send us packets and
@@ -429,6 +656,26 @@ otx_ep_dev_stats_reset(struct rte_eth_dev *eth_dev)
return 0;
}
+static int otx_ep_dev_rxq_irq_enable(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
+{
+ struct otx_ep_device *otx_epvf = OTX_EP_DEV(dev);
+ int rc;
+
+ rc = otx_epvf->fn_list.enable_rxq_intr(otx_epvf, rx_queue_id);
+ return rc;
+}
+
+static int otx_ep_dev_rxq_irq_disable(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
+{
+ struct otx_ep_device *otx_epvf = OTX_EP_DEV(dev);
+ int rc;
+
+ rc = otx_epvf->fn_list.disable_rxq_intr(otx_epvf, rx_queue_id);
+ return rc;
+}
+
/* Define our ethernet definitions */
static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.dev_configure = otx_ep_dev_configure,
@@ -442,6 +689,8 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.stats_get = otx_ep_dev_stats_get,
.stats_reset = otx_ep_dev_stats_reset,
.dev_infos_get = otx_ep_dev_info_get,
+ .rx_queue_intr_enable = otx_ep_dev_rxq_irq_enable,
+ .rx_queue_intr_disable = otx_ep_dev_rxq_irq_disable,
};
@@ -483,11 +732,17 @@ static int
otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+ uint16_t q;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
otx_epdev_exit(eth_dev);
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ for (q = 0; q < eth_dev->data->nb_rx_queues; q++)
+ otx_epvf_unset_rxq_intr(otx_epvf, q);
+ }
+
otx_epvf->port_configured = 0;
if (eth_dev->data->mac_addrs != NULL)
@@ -324,6 +324,33 @@ otx_ep_disable_io_queues(struct otx_ep_device *otx_ep)
}
}
+static uint32_t
+otx_ep_update_read_index(struct otx_ep_instr_queue *iq)
+{
+ uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
+
+ if (new_idx == 0xFFFFFFFF) {
+ otx_ep_dbg("%s Going to reset IQ index\n", __func__);
+ rte_write32(new_idx, iq->inst_cnt_reg);
+ }
+
+ /* The new instr cnt reg is a 32-bit counter that can roll over.
+ * We have noted the counter's initial value at init time into
+ * reset_instr_cnt
+ */
+ if (iq->reset_instr_cnt < new_idx)
+ new_idx -= iq->reset_instr_cnt;
+ else
+ new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ new_idx %= iq->nb_desc;
+
+ return new_idx;
+}
+
/* OTX_EP default configuration */
static const struct otx_ep_config default_otx_ep_conf = {
/* IQ attributes */
@@ -358,6 +385,41 @@ otx_ep_get_defconf(struct otx_ep_device *otx_ep_dev __rte_unused)
return default_conf;
}
+static int otx_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf __rte_unused,
+ uint16_t q_no __rte_unused)
+{
+ union otx_out_int_lvl_t out_int_lvl;
+ union otx_out_cnts_t out_cnts;
+
+ out_int_lvl.d64 = rte_read64(otx_epvf->hw_addr +
+ OTX_EP_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.cnt = 0;
+ otx_ep_write64(out_int_lvl.d64, otx_epvf->hw_addr,
+ OTX_EP_R_OUT_INT_LEVELS(q_no));
+
+ out_cnts.d64 = 0;
+ out_cnts.s.resend = 1;
+ otx_ep_write64(out_cnts.d64, otx_epvf->hw_addr,
+ OTX_EP_R_OUT_CNTS(q_no));
+
+ return 0;
+}
+
+static int otx_vf_disable_rxq_intr(struct otx_ep_device *otx_epvf __rte_unused,
+ uint16_t q_no __rte_unused)
+{
+ union otx_out_int_lvl_t out_int_lvl;
+
+ /* Increase the int level so that you get no more interrupts */
+ out_int_lvl.d64 = rte_read64(otx_epvf->hw_addr +
+ OTX_EP_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.cnt = 0xFFFFFFFF;
+ otx_ep_write64(out_int_lvl.d64, otx_epvf->hw_addr,
+ OTX_EP_R_OUT_INT_LEVELS(q_no));
+
+ return 0;
+}
+
int
otx_ep_vf_setup_device(struct otx_ep_device *otx_ep)
{
@@ -385,6 +447,7 @@ otx_ep_vf_setup_device(struct otx_ep_device *otx_ep)
otx_ep->fn_list.setup_oq_regs = otx_ep_setup_oq_regs;
otx_ep->fn_list.setup_device_regs = otx_ep_setup_device_regs;
+ otx_ep->fn_list.update_iq_read_idx = otx_ep_update_read_index;
otx_ep->fn_list.enable_io_queues = otx_ep_enable_io_queues;
otx_ep->fn_list.disable_io_queues = otx_ep_disable_io_queues;
@@ -394,7 +457,10 @@ otx_ep_vf_setup_device(struct otx_ep_device *otx_ep)
otx_ep->fn_list.enable_oq = otx_ep_enable_oq;
otx_ep->fn_list.disable_oq = otx_ep_disable_oq;
+ otx_ep->fn_list.enable_rxq_intr = otx_vf_enable_rxq_intr;
+ otx_ep->fn_list.disable_rxq_intr = otx_vf_disable_rxq_intr;
return 0;
}
+
@@ -170,4 +170,29 @@ struct otx_ep_instr_64B {
int
otx_ep_vf_setup_device(struct otx_ep_device *otx_ep);
+
+union otx_out_int_lvl_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timet:22;
+ uint64_t raz:9;
+ uint64_t bmode:1;
+ } s;
+};
+
+union otx_out_cnts_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timer:22;
+ uint64_t rsvd0:5;
+ uint64_t resend:1;
+ uint64_t mbox_int:1;
+ uint64_t in_int:1;
+ uint64_t out_int:1;
+ uint64_t rsvd1:1;
+ } s;
+};
+
#endif /*_OTX_EP_VF_H_ */