@@ -96,7 +96,9 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+#ifdef RTE_EAL_RX_INTR
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
+#endif
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
@@ -199,11 +201,15 @@ static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
+#ifdef RTE_EAL_RX_INTR
static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
+#endif
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
+#ifdef RTE_EAL_RX_INTR
static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
uint8_t index, uint8_t offset);
+#endif
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -760,7 +766,9 @@ eth_igb_start(struct rte_eth_dev *dev)
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+#ifdef RTE_EAL_RX_INTR
uint32_t intr_vector = 0;
+#endif
int ret, mask;
uint32_t ctrl_ext;
@@ -801,6 +809,7 @@ eth_igb_start(struct rte_eth_dev *dev)
/* configure PF module if SRIOV enabled */
igb_pf_host_configure(dev);
+#ifdef RTE_EAL_RX_INTR
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0)
intr_vector = dev->data->nb_rx_queues;
@@ -818,6 +827,7 @@ eth_igb_start(struct rte_eth_dev *dev)
return -ENOMEM;
}
}
+#endif
/* confiugre msix for rx interrupt */
eth_igb_configure_msix_intr(dev);
@@ -913,9 +923,11 @@ eth_igb_start(struct rte_eth_dev *dev)
" no intr multiplex\n");
}
+#ifdef RTE_EAL_RX_INTR
/* check if rxq interrupt is enabled */
if (dev->data->dev_conf.intr_conf.rxq != 0)
eth_igb_rxq_interrupt_setup(dev);
+#endif
/* enable uio/vfio intr/eventfd mapping */
rte_intr_enable(intr_handle);
@@ -1007,12 +1019,14 @@ eth_igb_stop(struct rte_eth_dev *dev)
}
filter_info->twotuple_mask = 0;
+#ifdef RTE_EAL_RX_INTR
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+#endif
}
static void
@@ -1020,7 +1034,9 @@ eth_igb_close(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link;
+#ifdef RTE_EAL_RX_INTR
struct rte_pci_device *pci_dev;
+#endif
eth_igb_stop(dev);
e1000_phy_hw_reset(hw);
@@ -1038,11 +1054,13 @@ eth_igb_close(struct rte_eth_dev *dev)
igb_dev_clear_queues(dev);
+#ifdef RTE_EAL_RX_INTR
pci_dev = dev->pci_dev;
if (pci_dev->intr_handle.intr_vec) {
rte_free(pci_dev->intr_handle.intr_vec);
pci_dev->intr_handle.intr_vec = NULL;
}
+#endif
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
@@ -1867,6 +1885,7 @@ eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
return 0;
}
+#ifdef RTE_EAL_RX_INTR
/*
* It clears the interrupt causes and enables the interrupt.
* It will be called once only during nic initialized.
@@ -1894,6 +1913,7 @@ static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
return 0;
}
+#endif
/*
* It reads ICR and gets interrupt causes, check it and set a bit flag
@@ -3750,6 +3770,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
return 0;
}
+#ifdef RTE_EAL_RX_INTR
static void
eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
uint8_t index, uint8_t offset)
@@ -3791,6 +3812,7 @@ eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
((queue & 0x1) << 4) + 8 * direction);
}
}
+#endif
/*
* Sets up the hardware to generate MSI-X interrupts properly
@@ -3800,18 +3822,21 @@ eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
static void
eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
{
+#ifdef RTE_EAL_RX_INTR
int queue_id;
uint32_t tmpval, regval, intr_mask;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
uint32_t vec = 0;
+#endif
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
/* won't configure msix register if no mapping is done
* between intr vector and event fd */
if (!rte_intr_dp_is_en(intr_handle))
return;
+#ifdef RTE_EAL_RX_INTR
/* set interrupt vector for other causes */
if (hw->mac.type == e1000_82575) {
tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
@@ -3868,6 +3893,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
}
E1000_WRITE_FLUSH(hw);
+#endif
}
@@ -174,7 +174,9 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+#ifdef RTE_EAL_RX_INTR
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+#endif
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -210,8 +212,10 @@ static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
+#ifdef RTE_EAL_RX_INTR
static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
+#endif
static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
@@ -234,8 +238,10 @@ static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
+#ifdef RTE_EAL_RX_INTR
static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
+#endif
static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
@@ -1481,7 +1487,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+#ifdef RTE_EAL_RX_INTR
uint32_t intr_vector = 0;
+#endif
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
int mask = 0;
@@ -1514,6 +1522,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* configure PF module if SRIOV enabled */
ixgbe_pf_host_configure(dev);
+#ifdef RTE_EAL_RX_INTR
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0)
intr_vector = dev->data->nb_rx_queues;
@@ -1532,6 +1541,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
return -1;
}
}
+#endif
/* confiugre msix for sleep until rx interrupt */
ixgbe_configure_msix(dev);
@@ -1619,9 +1629,11 @@ skip_link_setup:
" no intr multiplex\n");
}
+#ifdef RTE_EAL_RX_INTR
/* check if rxq interrupt is enabled */
if (dev->data->dev_conf.intr_conf.rxq != 0)
ixgbe_dev_rxq_interrupt_setup(dev);
+#endif
/* enable uio/vfio intr/eventfd mapping */
rte_intr_enable(intr_handle);
@@ -1727,12 +1739,14 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
memset(filter_info->fivetuple_mask, 0,
sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+#ifdef RTE_EAL_RX_INTR
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+#endif
}
/*
@@ -2335,6 +2349,7 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
* - On success, zero.
* - On failure, a negative value.
*/
+#ifdef RTE_EAL_RX_INTR
static int
ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
@@ -2345,6 +2360,7 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
return 0;
}
+#endif
/*
* It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
@@ -3127,7 +3143,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifdef RTE_EAL_RX_INTR
uint32_t intr_vector = 0;
+#endif
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int err, mask = 0;
@@ -3160,6 +3178,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
+#ifdef RTE_EAL_RX_INTR
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0)
intr_vector = dev->data->nb_rx_queues;
@@ -3177,7 +3196,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
return -ENOMEM;
}
}
-
+#endif
ixgbevf_configure_msix(dev);
if (dev->data->dev_conf.intr_conf.lsc != 0) {
@@ -3223,19 +3242,23 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
/* disable intr eventfd mapping */
rte_intr_disable(intr_handle);
+#ifdef RTE_EAL_RX_INTR
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+#endif
}
static void
ixgbevf_dev_close(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifdef RTE_EAL_RX_INTR
struct rte_pci_device *pci_dev;
+#endif
PMD_INIT_FUNC_TRACE();
@@ -3246,11 +3269,13 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+#ifdef RTE_EAL_RX_INTR
pci_dev = dev->pci_dev;
if (pci_dev->intr_handle.intr_vec) {
rte_free(pci_dev->intr_handle.intr_vec);
pci_dev->intr_handle.intr_vec = NULL;
}
+#endif
}
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
@@ -3834,6 +3859,7 @@ ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
return 0;
}
+#ifdef RTE_EAL_RX_INTR
static void
ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector)
@@ -3902,21 +3928,25 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
}
}
}
+#endif
static void
ixgbevf_configure_msix(struct rte_eth_dev *dev)
{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+#ifdef RTE_EAL_RX_INTR
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
uint32_t q_idx;
uint32_t vector_idx = 0;
+#endif
/* won't configure msix register if no mapping is done
* between intr vector and event fd */
if (!rte_intr_dp_is_en(intr_handle))
return;
+#ifdef RTE_EAL_RX_INTR
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
@@ -3927,6 +3957,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
/* Configure VF Rx queue ivar */
ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+#endif
}
/**
@@ -3937,18 +3968,21 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
static void
ixgbe_configure_msix(struct rte_eth_dev *dev)
{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+#ifdef RTE_EAL_RX_INTR
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
uint32_t queue_id, vec = 0;
uint32_t mask;
uint32_t gpie;
+#endif
/* won't configure msix register if no mapping is done
* between intr vector and event fd */
if (!rte_intr_dp_is_en(intr_handle))
return;
+#ifdef RTE_EAL_RX_INTR
/* setup GPIE for MSI-x mode */
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
@@ -4000,6 +4034,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+#endif
}
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
@@ -239,7 +239,7 @@ static struct rte_eth_conf port_conf = {
},
.intr_conf = {
.lsc = 1,
- .rxq = 1, /**< rxq interrupt feature enabled */
+ .rxq = 1,
},
};
@@ -889,7 +889,7 @@ main_loop(__attribute__((unused)) void *dummy)
}
/* add into event wait list */
- if (port_conf.intr_conf.rxq && event_register(qconf) == 0)
+ if (event_register(qconf) == 0)
intr_en = 1;
else
RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n");
@@ -49,9 +49,16 @@ enum rte_intr_handle_type {
struct rte_intr_handle {
int fd; /**< file descriptor */
enum rte_intr_handle_type type; /**< handle type */
+#ifdef RTE_EAL_RX_INTR
+ /**
+ * RTE_EAL_RX_INTR will be removed from v2.2.
+ * It's only used to avoid ABI(unannounced) broken in v2.1.
+ * Make sure being aware of the impact before turning on the feature.
+ */
int max_intr; /**< max interrupt requested */
uint32_t nb_efd; /**< number of available efds */
int *intr_vec; /**< intr vector number array */
+#endif
};
/**
@@ -290,18 +290,26 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
irq_set = (struct vfio_irq_set *) irq_set_buf;
irq_set->argsz = len;
+#ifdef RTE_EAL_RX_INTR
if (!intr_handle->max_intr)
intr_handle->max_intr = 1;
else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID)
intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1;
irq_set->count = intr_handle->max_intr;
+#else
+ irq_set->count = 1;
+#endif
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *) &irq_set->data;
+#ifdef RTE_EAL_RX_INTR
memcpy(fd_ptr, intr_handle->efds, sizeof(intr_handle->efds));
fd_ptr[intr_handle->max_intr - 1] = intr_handle->fd;
+#else
+ fd_ptr[0] = intr_handle->fd;
+#endif
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
@@ -875,6 +883,7 @@ rte_eal_intr_init(void)
return -ret;
}
+#ifdef RTE_EAL_RX_INTR
static void
eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
{
@@ -917,6 +926,7 @@ eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
return;
} while (1);
}
+#endif
static int
eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
@@ -1054,6 +1064,7 @@ rte_epoll_ctl(int epfd, int op, int fd,
return 0;
}
+#ifdef RTE_EAL_RX_INTR
int
rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
int op, unsigned int vec, void *data)
@@ -1165,3 +1176,4 @@ rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
intr_handle->nb_efd = 0;
intr_handle->max_intr = 0;
}
+#endif
@@ -38,6 +38,10 @@
#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
#define _RTE_LINUXAPP_INTERRUPTS_H_
+#ifndef RTE_EAL_RX_INTR
+#include <rte_common.h>
+#endif
+
#define RTE_MAX_RXTX_INTR_VEC_ID 32
enum rte_intr_handle_type {
@@ -86,12 +90,19 @@ struct rte_intr_handle {
};
int fd; /**< interrupt event file descriptor */
enum rte_intr_handle_type type; /**< handle type */
+#ifdef RTE_EAL_RX_INTR
+ /**
+ * RTE_EAL_RX_INTR will be removed from v2.2.
+ * It's only used to avoid ABI(unannounced) broken in v2.1.
+ * Make sure being aware of the impact before turning on the feature.
+ */
uint32_t max_intr; /**< max interrupt requested */
uint32_t nb_efd; /**< number of available efds */
int efds[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vectors/efds mapping */
struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID];
/**< intr vector epoll event */
int *intr_vec; /**< intr vector number array */
+#endif
};
#define RTE_EPOLL_PER_THREAD -1 /**< to hint using per thread epfd */
@@ -162,9 +173,23 @@ rte_intr_tls_epfd(void);
* - On success, zero.
* - On failure, a negative value.
*/
-int
+#ifdef RTE_EAL_RX_INTR
+extern int
rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
int epfd, int op, unsigned int vec, void *data);
+#else
+static inline int
+rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
+ int epfd, int op, unsigned int vec, void *data)
+{
+ RTE_SET_USED(intr_handle);
+ RTE_SET_USED(epfd);
+ RTE_SET_USED(op);
+ RTE_SET_USED(vec);
+ RTE_SET_USED(data);
+ return -ENOTSUP;
+}
+#endif
/**
* It enables the fastpath event fds if it's necessary.
@@ -179,8 +204,18 @@ rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
* - On success, zero.
* - On failure, a negative value.
*/
-int
+#ifdef RTE_EAL_RX_INTR
+extern int
rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd);
+#else
+static inline int
+rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
+{
+ RTE_SET_USED(intr_handle);
+ RTE_SET_USED(nb_efd);
+ return 0;
+}
+#endif
/**
* It disable the fastpath event fds.
@@ -189,8 +224,17 @@ rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd);
* @param intr_handle
* Pointer to the interrupt handle.
*/
-void
+#ifdef RTE_EAL_RX_INTR
+extern void
rte_intr_efd_disable(struct rte_intr_handle *intr_handle);
+#else
+static inline void
+rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
+{
+ RTE_SET_USED(intr_handle);
+ return;
+}
+#endif
/**
* The fastpath interrupt is enabled or not.
@@ -198,11 +242,20 @@ rte_intr_efd_disable(struct rte_intr_handle *intr_handle);
* @param intr_handle
* Pointer to the interrupt handle.
*/
+#ifdef RTE_EAL_RX_INTR
static inline int
rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
{
return !(!intr_handle->nb_efd);
}
+#else
+static inline int
+rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
+{
+ RTE_SET_USED(intr_handle);
+ return 0;
+}
+#endif
/**
* The interrupt handle instance allows other cause or not.
@@ -211,10 +264,19 @@ rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
* @param intr_handle
* Pointer to the interrupt handle.
*/
+#ifdef RTE_EAL_RX_INTR
static inline int
rte_intr_allow_others(struct rte_intr_handle *intr_handle)
{
return !!(intr_handle->max_intr - intr_handle->nb_efd);
}
+#else
+static inline int
+rte_intr_allow_others(struct rte_intr_handle *intr_handle)
+{
+ RTE_SET_USED(intr_handle);
+ return 1;
+}
+#endif
#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
@@ -3282,6 +3282,7 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
}
+#ifdef RTE_EAL_RX_INTR
int
rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
{
@@ -3353,6 +3354,7 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
return 0;
}
+#endif
int
rte_eth_dev_rx_intr_enable(uint8_t port_id,
@@ -830,8 +830,10 @@ struct rte_eth_fdir {
struct rte_intr_conf {
/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
uint16_t lsc;
+#ifdef RTE_EAL_RX_INTR
/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
uint16_t rxq;
+#endif
};
/**
@@ -2943,8 +2945,20 @@ int rte_eth_dev_rx_intr_disable(uint8_t port_id,
* - On success, zero.
* - On failure, a negative value.
*/
-int
+#ifdef RTE_EAL_RX_INTR
+extern int
rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
+#else
+static inline int
+rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
+{
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(epfd);
+ RTE_SET_USED(op);
+ RTE_SET_USED(data);
+ return -1;
+}
+#endif
/**
* RX Interrupt control per queue.
@@ -2967,9 +2981,23 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
* - On success, zero.
* - On failure, a negative value.
*/
-int
+#ifdef RTE_EAL_RX_INTR
+extern int
rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
int epfd, int op, void *data);
+#else
+static inline int
+rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
+ int epfd, int op, void *data)
+{
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(queue_id);
+ RTE_SET_USED(epfd);
+ RTE_SET_USED(op);
+ RTE_SET_USED(data);
+ return -1;
+}
+#endif
/**
* Turn on the LED on the Ethernet device.