[dpdk-dev,v1,2/5] ixgbe: enable rx queue interrupts for both PF and VF

Message ID 1422438631-7853-3-git-send-email-danny.zhou@intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Zhou, Danny Jan. 28, 2015, 9:50 a.m. UTC
  Signed-off-by: Danny Zhou <danny.zhou@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 371 ++++++++++++++++++++++++++++++++++++
 lib/librte_pmd_ixgbe/ixgbe_ethdev.h |   9 +
 2 files changed, 380 insertions(+)
  

Comments

Michael Qiu Jan. 29, 2015, 3:40 a.m. UTC | #1
On 1/28/2015 5:52 PM, Danny Zhou wrote:
> Signed-off-by: Danny Zhou <danny.zhou@intel.com>
> ---
>  lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 371 ++++++++++++++++++++++++++++++++++++
>  lib/librte_pmd_ixgbe/ixgbe_ethdev.h |   9 +
>  2 files changed, 380 insertions(+)
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> index b341dd0..39f883a 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> @@ -60,6 +60,7 @@
>  #include <rte_atomic.h>
>  #include <rte_malloc.h>
>  #include <rte_random.h>
> +#include <rte_spinlock.h>
>  #include <rte_dev.h>
>  
>  #include "ixgbe_logs.h"
> @@ -173,6 +174,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
>  			uint16_t reta_size);
>  static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
>  static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
> +static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
>  static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
>  static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
>  static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
> @@ -186,11 +188,14 @@ static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_conf
>  /* For Virtual Function support */
>  static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
>  		struct rte_eth_dev *eth_dev);
> +static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
> +static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
>  static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
>  static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
>  static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
>  static void ixgbevf_dev_close(struct rte_eth_dev *dev);
>  static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
> +static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
>  static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
>  		struct rte_eth_stats *stats);
>  static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
> @@ -198,8 +203,15 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
>  		uint16_t vlan_id, int on);
>  static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
>  		uint16_t queue, int on);
> +static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);

^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>  static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
>  static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
> +static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
> +		void *param);
> +static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
> +static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
> +static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);

Yes re-claim static void ixgbevf_set_ivar()  for twice? Or are they
different?

> +static void ixgbevf_configure_msix(struct  ixgbe_hw *hw);
>  
>  /* For Eth VMDQ APIs support */
>  static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
> @@ -217,6 +229,11 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
>  static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
>  		uint8_t	rule_id);

[...]
> +static void
> +ixgbe_configure_msix(struct ixgbe_hw *hw)
> +{
> +	int queue_id;
> +	u32 mask;
> +	u32 gpie;
> +
> +	/* set GPIE for in MSI-x mode */
> +	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
> +	gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
> +		   IXGBE_GPIE_OCD;
> +	gpie |= IXGBE_GPIE_EIAME;

As you will override gpie with other flags why need to read the reg and
save to gpie first?

Maybe read the reg to reset?

I guess should be:

+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+		IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;

Maybe not correct as I not familiar with IXGBE.


> +	/*
> +	 * use EIAM to auto-mask when MSI-X interrupt is asserted
> +	 * this saves a register write for every interrupt
> +	 */
> +	switch (hw->mac.type) {
> +	case ixgbe_mac_82598EB:
> +		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
> +		break;
> +	case ixgbe_mac_82599EB:
> +	case ixgbe_mac_X540:
> +	default:
> +		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
> +		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
> +		break;
> +	}
> +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
> +
> +	/*
> +	* Populate the IVAR table and set the ITR values to the
> +	* corresponding register.
> +	*/
> +	for (queue_id = 0; queue_id < VFIO_MAX_QUEUE_ID; queue_id++)
> +		ixgbe_set_ivar(hw, 0, queue_id, queue_id);
> +
> +	switch (hw->mac.type) {
> +	case ixgbe_mac_82598EB:
> +		ixgbe_set_ivar(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
> +			       VFIO_MAX_QUEUE_ID);
> +		break;
> +	case ixgbe_mac_82599EB:
> +	case ixgbe_mac_X540:
> +		ixgbe_set_ivar(hw, -1, 1, 32);

May be better to make those values for a macro just as above.
 
> +		break;
> +	default:
> +		break;
> +	}
> +	IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id), 1950);

Also here, what's "1950" stands for?

> +
> +	/* set up to autoclear timer, and the vectors */
> +	mask = IXGBE_EIMS_ENABLE_MASK;
> +	mask &= ~(IXGBE_EIMS_OTHER |
> +		  IXGBE_EIMS_MAILBOX |
> +		  IXGBE_EIMS_LSC);
> +
> +	IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
> +}
> +
>  static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>  	uint16_t queue_idx, uint16_t tx_rate)
>  {
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> index 1383194..328c387 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> @@ -38,6 +38,8 @@
>  #include "ixgbe/ixgbe_dcb_82598.h"
>  #include "ixgbe_bypass.h"
>  
> +#include <rte_spinlock.h>
> +
>  /* need update link, bit flag */
>  #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
>  #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
> @@ -98,6 +100,11 @@
>  #define IXGBE_5TUPLE_MAX_PRI            7
>  #define IXGBE_5TUPLE_MIN_PRI            1
>  
> +#define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf interrupt enable mask */
> +#define IXGBE_VF_MAXMSIVECTOR			1
> +/* maximum other interrupts besides rx&tx*/
> +#define IXGBE_MAX_OTHER_INTR		1
> +#define IXGBEVF_MAX_OTHER_INTR		1
>  /*
>   * Information about the fdir mode.
>   */
> @@ -116,6 +123,7 @@ struct ixgbe_hw_fdir_info {
>  struct ixgbe_interrupt {
>  	uint32_t flags;
>  	uint32_t mask;
> +	rte_spinlock_t lock;
>  };
>  
>  struct ixgbe_stat_mapping_registers {
> @@ -260,6 +268,7 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
>  		uint16_t rx_queue_id);
>  
>  int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
> +int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
>  
>  int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
>
  
Zhou, Danny Jan. 29, 2015, 5:39 a.m. UTC | #2
> -----Original Message-----
> From: Qiu, Michael
> Sent: Thursday, January 29, 2015 11:40 AM
> To: Zhou, Danny; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v1 2/5] ixgbe: enable rx queue interrupts for both PF and VF
> 
> On 1/28/2015 5:52 PM, Danny Zhou wrote:
> > Signed-off-by: Danny Zhou <danny.zhou@intel.com>
> > ---
> >  lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 371 ++++++++++++++++++++++++++++++++++++
> >  lib/librte_pmd_ixgbe/ixgbe_ethdev.h |   9 +
> >  2 files changed, 380 insertions(+)
> >
> > diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> > index b341dd0..39f883a 100644
> > --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> > @@ -60,6 +60,7 @@
> >  #include <rte_atomic.h>
> >  #include <rte_malloc.h>
> >  #include <rte_random.h>
> > +#include <rte_spinlock.h>
> >  #include <rte_dev.h>
> >
> >  #include "ixgbe_logs.h"
> > @@ -173,6 +174,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
> >  			uint16_t reta_size);
> >  static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
> >  static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
> > +static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
> >  static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
> >  static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
> >  static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
> > @@ -186,11 +188,14 @@ static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_conf
> >  /* For Virtual Function support */
> >  static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
> >  		struct rte_eth_dev *eth_dev);
> > +static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
> > +static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
> >  static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
> >  static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
> >  static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
> >  static void ixgbevf_dev_close(struct rte_eth_dev *dev);
> >  static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
> > +static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
> >  static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
> >  		struct rte_eth_stats *stats);
> >  static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
> > @@ -198,8 +203,15 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
> >  		uint16_t vlan_id, int on);
> >  static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
> >  		uint16_t queue, int on);
> > +static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);
> 
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> >  static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
> >  static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
> > +static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
> > +		void *param);
> > +static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
> > +static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
> > +static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);
> 
> Yes re-claim static void ixgbevf_set_ivar()  for twice? Or are they
> different?
> 

Good catch.

> > +static void ixgbevf_configure_msix(struct  ixgbe_hw *hw);
> >
> >  /* For Eth VMDQ APIs support */
> >  static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
> > @@ -217,6 +229,11 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
> >  static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
> >  		uint8_t	rule_id);
> 
> [...]
> > +static void
> > +ixgbe_configure_msix(struct ixgbe_hw *hw)
> > +{
> > +	int queue_id;
> > +	u32 mask;
> > +	u32 gpie;
> > +
> > +	/* set GPIE for in MSI-x mode */
> > +	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
> > +	gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
> > +		   IXGBE_GPIE_OCD;
> > +	gpie |= IXGBE_GPIE_EIAME;
> 
> As you will override gpie with other flags why need to read the reg and
> save to gpie first?
> 
> Maybe read the reg to reset?
> 
> I guess should be:
> 
> +	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
> +	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
> +		IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
> 
> Maybe not correct as I not familiar with IXGBE.
> 
> 
Accepted. 

> > +	/*
> > +	 * use EIAM to auto-mask when MSI-X interrupt is asserted
> > +	 * this saves a register write for every interrupt
> > +	 */
> > +	switch (hw->mac.type) {
> > +	case ixgbe_mac_82598EB:
> > +		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
> > +		break;
> > +	case ixgbe_mac_82599EB:
> > +	case ixgbe_mac_X540:
> > +	default:
> > +		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
> > +		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
> > +		break;
> > +	}
> > +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
> > +
> > +	/*
> > +	* Populate the IVAR table and set the ITR values to the
> > +	* corresponding register.
> > +	*/
> > +	for (queue_id = 0; queue_id < VFIO_MAX_QUEUE_ID; queue_id++)
> > +		ixgbe_set_ivar(hw, 0, queue_id, queue_id);
> > +
> > +	switch (hw->mac.type) {
> > +	case ixgbe_mac_82598EB:
> > +		ixgbe_set_ivar(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
> > +			       VFIO_MAX_QUEUE_ID);
> > +		break;
> > +	case ixgbe_mac_82599EB:
> > +	case ixgbe_mac_X540:
> > +		ixgbe_set_ivar(hw, -1, 1, 32);
> 
> May be better to make those values for a macro just as above.
> 

To be fixed in V2.

> > +		break;
> > +	default:
> > +		break;
> > +	}
> > +	IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id), 1950);
> 
> Also here, what's "1950" stands for?
> 

It is an experienced interrupt throttle value which is used for interrupt support

> > +
> > +	/* set up to autoclear timer, and the vectors */
> > +	mask = IXGBE_EIMS_ENABLE_MASK;
> > +	mask &= ~(IXGBE_EIMS_OTHER |
> > +		  IXGBE_EIMS_MAILBOX |
> > +		  IXGBE_EIMS_LSC);
> > +
> > +	IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
> > +}
> > +
> >  static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
> >  	uint16_t queue_idx, uint16_t tx_rate)
> >  {
> > diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> > index 1383194..328c387 100644
> > --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> > @@ -38,6 +38,8 @@
> >  #include "ixgbe/ixgbe_dcb_82598.h"
> >  #include "ixgbe_bypass.h"
> >
> > +#include <rte_spinlock.h>
> > +
> >  /* need update link, bit flag */
> >  #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
> >  #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
> > @@ -98,6 +100,11 @@
> >  #define IXGBE_5TUPLE_MAX_PRI            7
> >  #define IXGBE_5TUPLE_MIN_PRI            1
> >
> > +#define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf interrupt enable mask */
> > +#define IXGBE_VF_MAXMSIVECTOR			1
> > +/* maximum other interrupts besides rx&tx*/
> > +#define IXGBE_MAX_OTHER_INTR		1
> > +#define IXGBEVF_MAX_OTHER_INTR		1
> >  /*
> >   * Information about the fdir mode.
> >   */
> > @@ -116,6 +123,7 @@ struct ixgbe_hw_fdir_info {
> >  struct ixgbe_interrupt {
> >  	uint32_t flags;
> >  	uint32_t mask;
> > +	rte_spinlock_t lock;
> >  };
> >
> >  struct ixgbe_stat_mapping_registers {
> > @@ -260,6 +268,7 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
> >  		uint16_t rx_queue_id);
> >
> >  int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
> > +int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
> >
> >  int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
> >
  
Michael Qiu Jan. 29, 2015, 10:13 a.m. UTC | #3
On 1/29/2015 1:39 PM, Zhou, Danny wrote:
>
>> -----Original Message-----
>> From: Qiu, Michael
>> Sent: Thursday, January 29, 2015 11:40 AM
>> To: Zhou, Danny; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH v1 2/5] ixgbe: enable rx queue interrupts for both PF and VF
>>
>> On 1/28/2015 5:52 PM, Danny Zhou wrote:
>>> Signed-off-by: Danny Zhou <danny.zhou@intel.com>
>>> ---
>>>  lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 371 ++++++++++++++++++++++++++++++++++++
>>>  lib/librte_pmd_ixgbe/ixgbe_ethdev.h |   9 +
>>>  2 files changed, 380 insertions(+)
>>>
>>> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>>> index b341dd0..39f883a 100644
>>> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>>> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>>> @@ -60,6 +60,7 @@
>>>  #include <rte_atomic.h>
>>>  #include <rte_malloc.h>
>>>  #include <rte_random.h>
>>> +#include <rte_spinlock.h>
>>>  #include <rte_dev.h>
>>>
>>>  #include "ixgbe_logs.h"
>>> @@ -173,6 +174,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
>>>  			uint16_t reta_size);
>>>  static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
>>>  static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
>>> +static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
>>>  static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
>>>  static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
>>>  static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
>>> @@ -186,11 +188,14 @@ static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_conf
>>>  /* For Virtual Function support */
>>>  static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
>>>  		struct rte_eth_dev *eth_dev);
>>> +static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
>>> +static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
>>>  static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
>>>  static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
>>>  static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
>>>  static void ixgbevf_dev_close(struct rte_eth_dev *dev);
>>>  static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
>>> +static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
>>>  static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
>>>  		struct rte_eth_stats *stats);
>>>  static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
>>> @@ -198,8 +203,15 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
>>>  		uint16_t vlan_id, int on);
>>>  static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
>>>  		uint16_t queue, int on);
>>> +static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);
>> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>>  static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
>>>  static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
>>> +static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
>>> +		void *param);
>>> +static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
>>> +static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
>>> +static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);
>> Yes re-claim static void ixgbevf_set_ivar()  for twice? Or are they
>> different?
>>
> Good catch.
>
>>> +static void ixgbevf_configure_msix(struct  ixgbe_hw *hw);
>>>
>>>  /* For Eth VMDQ APIs support */
>>>  static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
>>> @@ -217,6 +229,11 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
>>>  static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
>>>  		uint8_t	rule_id);
>> [...]
>>> +static void
>>> +ixgbe_configure_msix(struct ixgbe_hw *hw)
>>> +{
>>> +	int queue_id;
>>> +	u32 mask;
>>> +	u32 gpie;
>>> +
>>> +	/* set GPIE for in MSI-x mode */
>>> +	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
>>> +	gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
>>> +		   IXGBE_GPIE_OCD;
>>> +	gpie |= IXGBE_GPIE_EIAME;
>> As you will override gpie with other flags why need to read the reg and
>> save to gpie first?
>>
>> Maybe read the reg to reset?
>>
>> I guess should be:
>>
>> +	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
>> +	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
>> +		IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
>>
>> Maybe not correct as I not familiar with IXGBE.
>>
>>
> Accepted. 
>
>>> +	/*
>>> +	 * use EIAM to auto-mask when MSI-X interrupt is asserted
>>> +	 * this saves a register write for every interrupt
>>> +	 */
>>> +	switch (hw->mac.type) {
>>> +	case ixgbe_mac_82598EB:
>>> +		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
>>> +		break;
>>> +	case ixgbe_mac_82599EB:
>>> +	case ixgbe_mac_X540:
>>> +	default:
>>> +		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
>>> +		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
>>> +		break;
>>> +	}
>>> +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
>>> +
>>> +	/*
>>> +	* Populate the IVAR table and set the ITR values to the
>>> +	* corresponding register.
>>> +	*/
>>> +	for (queue_id = 0; queue_id < VFIO_MAX_QUEUE_ID; queue_id++)
>>> +		ixgbe_set_ivar(hw, 0, queue_id, queue_id);
>>> +
>>> +	switch (hw->mac.type) {
>>> +	case ixgbe_mac_82598EB:
>>> +		ixgbe_set_ivar(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
>>> +			       VFIO_MAX_QUEUE_ID);
>>> +		break;
>>> +	case ixgbe_mac_82599EB:
>>> +	case ixgbe_mac_X540:
>>> +		ixgbe_set_ivar(hw, -1, 1, 32);
>> May be better to make those values for a macro just as above.
>>
> To be fixed in V2.
>
>>> +		break;
>>> +	default:
>>> +		break;
>>> +	}
>>> +	IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id), 1950);
>> Also here, what's "1950" stands for?
>>
> It is an experienced interrupt throttle value which is used for interrupt support

Then, I think it is better to define it in a macro and do some brief
introduction on this value.

Thanks,
Michael
>>> +
>>> +	/* set up to autoclear timer, and the vectors */
>>> +	mask = IXGBE_EIMS_ENABLE_MASK;
>>> +	mask &= ~(IXGBE_EIMS_OTHER |
>>> +		  IXGBE_EIMS_MAILBOX |
>>> +		  IXGBE_EIMS_LSC);
>>> +
>>> +	IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
>>> +}
>>> +
>>>  static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>>>  	uint16_t queue_idx, uint16_t tx_rate)
>>>  {
>>> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>>> index 1383194..328c387 100644
>>> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>>> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>>> @@ -38,6 +38,8 @@
>>>  #include "ixgbe/ixgbe_dcb_82598.h"
>>>  #include "ixgbe_bypass.h"
>>>
>>> +#include <rte_spinlock.h>
>>> +
>>>  /* need update link, bit flag */
>>>  #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
>>>  #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
>>> @@ -98,6 +100,11 @@
>>>  #define IXGBE_5TUPLE_MAX_PRI            7
>>>  #define IXGBE_5TUPLE_MIN_PRI            1
>>>
>>> +#define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf interrupt enable mask */
>>> +#define IXGBE_VF_MAXMSIVECTOR			1
>>> +/* maximum other interrupts besides rx&tx*/
>>> +#define IXGBE_MAX_OTHER_INTR		1
>>> +#define IXGBEVF_MAX_OTHER_INTR		1
>>>  /*
>>>   * Information about the fdir mode.
>>>   */
>>> @@ -116,6 +123,7 @@ struct ixgbe_hw_fdir_info {
>>>  struct ixgbe_interrupt {
>>>  	uint32_t flags;
>>>  	uint32_t mask;
>>> +	rte_spinlock_t lock;
>>>  };
>>>
>>>  struct ixgbe_stat_mapping_registers {
>>> @@ -260,6 +268,7 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
>>>  		uint16_t rx_queue_id);
>>>
>>>  int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
>>> +int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
>>>
>>>  int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
>>>
>
  

Patch

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index b341dd0..39f883a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -60,6 +60,7 @@ 
 #include <rte_atomic.h>
 #include <rte_malloc.h>
 #include <rte_random.h>
+#include <rte_spinlock.h>
 #include <rte_dev.h>
 
 #include "ixgbe_logs.h"
@@ -173,6 +174,7 @@  static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -186,11 +188,14 @@  static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_conf
 /* For Virtual Function support */
 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
 		struct rte_eth_dev *eth_dev);
+static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
 		struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -198,8 +203,15 @@  static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
 		uint16_t vlan_id, int on);
 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
 		uint16_t queue, int on);
+static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);
 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+		void *param);
+static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);
+static void ixgbevf_configure_msix(struct  ixgbe_hw *hw);
 
 /* For Eth VMDQ APIs support */
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
@@ -217,6 +229,11 @@  static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
 		uint8_t	rule_id);
 
+static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+static void ixgbe_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 msix_vector);
+static void ixgbe_configure_msix(struct  ixgbe_hw *hw);
+
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 		uint16_t queue_idx, uint16_t tx_rate);
 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
@@ -338,6 +355,8 @@  static struct eth_dev_ops ixgbe_eth_dev_ops = {
 	.tx_queue_start	      = ixgbe_dev_tx_queue_start,
 	.tx_queue_stop        = ixgbe_dev_tx_queue_stop,
 	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+	.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
+	.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
 	.rx_queue_release     = ixgbe_dev_rx_queue_release,
 	.rx_queue_count       = ixgbe_dev_rx_queue_count,
 	.rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
@@ -412,8 +431,11 @@  static struct eth_dev_ops ixgbevf_eth_dev_ops = {
 	.vlan_offload_set     = ixgbevf_vlan_offload_set,
 	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
 	.rx_queue_release     = ixgbe_dev_rx_queue_release,
+	.rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
 	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
 	.tx_queue_release     = ixgbe_dev_tx_queue_release,
+	.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
+	.rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
 	.mac_addr_add         = ixgbevf_add_mac_addr,
 	.mac_addr_remove      = ixgbevf_remove_mac_addr,
 };
@@ -908,6 +930,9 @@  eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 			eth_dev->data->port_id, pci_dev->id.vendor_id,
 			pci_dev->id.device_id);
 
+	/* set max interrupt vfio request */
+	pci_dev->intr_handle.max_intr = hw->mac.max_rx_queues + IXGBE_MAX_OTHER_INTR;
+
 	rte_intr_callback_register(&(pci_dev->intr_handle),
 		ixgbe_dev_interrupt_handler, (void *)eth_dev);
 
@@ -1084,6 +1109,14 @@  eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 			return (-EIO);
 	}
 
+	/* set max interrupt vfio request */
+	pci_dev->intr_handle.max_intr = hw->mac.max_rx_queues + IXGBEVF_MAX_OTHER_INTR;
+
+	rte_intr_callback_register(&(pci_dev->intr_handle),
+		ixgbevf_dev_interrupt_handler, (void *)eth_dev);
+
+	rte_intr_enable(&(pci_dev->intr_handle));
+
 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
 		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
@@ -1485,6 +1518,9 @@  ixgbe_dev_start(struct rte_eth_dev *dev)
 	/* configure PF module if SRIOV enabled */
 	ixgbe_pf_host_configure(dev);
 
+	/* confiugre msix for  sleep until  rx interrupt */
+	ixgbe_configure_msix(hw);
+
 	/* initialize transmission unit */
 	ixgbe_dev_tx_init(dev);
 
@@ -1560,6 +1596,10 @@  skip_link_setup:
 	if (dev->data->dev_conf.intr_conf.lsc != 0)
 		ixgbe_dev_lsc_interrupt_setup(dev);
 
+	/* check if rxq interrupt is enabled */
+	if (dev->data->dev_conf.intr_conf.rxq != 0)
+		ixgbe_dev_rxq_interrupt_setup(dev);
+
 	/* resume enabled intr since hw reset */
 	ixgbe_enable_intr(dev);
 
@@ -2221,6 +2261,29 @@  ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_RTX_QUEUE;
+	rte_spinlock_init(&intr->lock);
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -2258,6 +2321,30 @@  ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+	uint32_t eicr;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	/* clear all cause mask */
+	ixgbevf_intr_disable(hw);
+
+	/* read-on-clear nic registers here */
+	eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+	PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+	intr->flags = 0;
+
+	/* set flag for async link update */
+	if (eicr & IXGBE_EICR_LSC)
+		intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+	return 0;
+}
+
 /**
  * It gets and then prints the link status.
  *
@@ -2353,6 +2440,18 @@  ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_DRV_LOG(DEBUG, "enable intr immediately");
+	ixgbevf_intr_enable(hw);
+	rte_intr_enable(&(dev->pci_dev->intr_handle));
+	return 0;
+}
+
 /**
  * Interrupt handler which shall be registered for alarm callback for delayed
  * handling specific interrupt to wait for the stable nic state. As the
@@ -2414,6 +2513,15 @@  ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbe_dev_interrupt_action(dev);
 }
 
+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+							void *param)
+{
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+	ixgbevf_dev_interrupt_get_status(dev);
+	ixgbevf_dev_interrupt_action(dev);
+}
+
 static int
 ixgbe_dev_led_on(struct rte_eth_dev *dev)
 {
@@ -2912,6 +3020,19 @@  ixgbevf_intr_disable(struct ixgbe_hw *hw)
 	IXGBE_WRITE_FLUSH(hw);
 }
 
+static void
+ixgbevf_intr_enable(struct ixgbe_hw *hw)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	/* VF enable interrupt autoclean */
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
 static int
 ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -2974,6 +3095,11 @@  ixgbevf_dev_start(struct rte_eth_dev *dev)
 
 	ixgbevf_dev_rxtx_start(dev);
 
+	ixgbevf_configure_msix(hw);
+
+	/* Re-enable interrupt for VF */
+	ixgbevf_intr_enable(hw);
+
 	return 0;
 }
 
@@ -3511,6 +3637,251 @@  ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 	return 0;
 }
 
+
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	uint32_t mask;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	rte_spinlock_lock(&(intr->lock));
+	mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+	mask |= (1 << queue_id);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+	rte_spinlock_unlock(&(intr->lock));
+
+	return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	uint32_t mask;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	rte_spinlock_lock(&(intr->lock));
+	mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+	mask &= ~(1 << queue_id);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+	rte_spinlock_unlock(&(intr->lock));
+
+	return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	u32 mask;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	rte_spinlock_lock(&(intr->lock));
+	if (queue_id < 16) {
+		ixgbe_disable_intr(hw);
+		intr->mask |= (1 << queue_id);
+		ixgbe_enable_intr(dev);
+	} else if (queue_id < 32) {
+		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+		mask &= (1 << queue_id);
+		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+	} else if (queue_id < 64) {
+		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+		mask &= (1 << (queue_id - 32));
+		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+	}
+	rte_spinlock_unlock(&(intr->lock));
+
+	return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+	u32 mask;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	rte_spinlock_lock(&(intr->lock));
+	if (queue_id < 16) {
+		ixgbe_disable_intr(hw);
+		intr->mask &= ~(1 << queue_id);
+		ixgbe_enable_intr(dev);
+	} else if (queue_id < 32) {
+		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+		mask &= ~(1 << queue_id);
+		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+	} else if (queue_id < 64) {
+		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+		mask &= ~(1 << (queue_id - 32));
+		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+	}
+	rte_spinlock_unlock(&(intr->lock));
+
+	return 0;
+}
+
+static void
+ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction,
+			u8 queue, u8 msix_vector)
+{
+	u32 ivar, index;
+	if (direction == -1) {
+		/* other causes */
+		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+		ivar &= ~0xFF;
+		ivar |= msix_vector;
+		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+	} else {
+		/* tx or tx cause */
+		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+		index = ((16 * (queue & 1)) + (8 * direction));
+		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+		ivar &= ~(0xFF << index);
+		ivar |= (msix_vector << index);
+		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+	}
+}
+
+/**
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @hw: pointer to ixgbe_hw struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar(struct ixgbe_hw *hw, s8 direction,
+			   u8 queue, u8 msix_vector)
+{
+	u32 ivar, index;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+		if (direction == -1)
+			direction = 0;
+		index = (((direction * 64) + queue) >> 2) & 0x1F;
+		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+		ivar &= ~(0xFF << (8 * (queue & 0x3)));
+		ivar |= (msix_vector << (8 * (queue & 0x3)));
+		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		if (direction == -1) {
+			/* other causes */
+			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+			index = ((queue & 1) * 8);
+			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+			ivar &= ~(0xFF << index);
+			ivar |= (msix_vector << index);
+			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+			break;
+		} else {
+			/* tx or rx causes */
+			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+			index = ((16 * (queue & 1)) + (8 * direction));
+			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+			ivar &= ~(0xFF << index);
+			ivar |= (msix_vector << index);
+			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
+			break;
+		}
+	default:
+		break;
+	}
+}
+
+
+static void
+ixgbevf_configure_msix(struct ixgbe_hw *hw)
+{
+	u32 queue_idx, vector_idx;
+	/* Configure all RX queues of VF */
+	for (vector_idx = 0; vector_idx < IXGBE_VF_MAXMSIVECTOR; vector_idx++) {
+		for (queue_idx = 0; queue_idx < (hw->mac.max_rx_queues - 1); queue_idx++)
+			ixgbevf_set_ivar(hw, 0, queue_idx, vector_idx);
+	}
+
+	/* Configure VF Rx queue ivar */
+	ixgbevf_set_ivar(hw, -1, 1, vector_idx);
+}
+
+/**
+ * ixgbe_configure_msix - Configure MSI-X hardware
+ * @hw: board private structure
+ * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ */
+static void
+ixgbe_configure_msix(struct ixgbe_hw *hw)
+{
+	int queue_id;
+	u32 mask;
+	u32 gpie;
+
+	/* set GPIE for in MSI-x mode */
+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+	gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+		   IXGBE_GPIE_OCD;
+	gpie |= IXGBE_GPIE_EIAME;
+	/*
+	 * use EIAM to auto-mask when MSI-X interrupt is asserted
+	 * this saves a register write for every interrupt
+	 */
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	default:
+		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+		break;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+	/*
+	* Populate the IVAR table and set the ITR values to the
+	* corresponding register.
+	*/
+	for (queue_id = 0; queue_id < VFIO_MAX_QUEUE_ID; queue_id++)
+		ixgbe_set_ivar(hw, 0, queue_id, queue_id);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ixgbe_set_ivar(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+			       VFIO_MAX_QUEUE_ID);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		ixgbe_set_ivar(hw, -1, 1, 32);
+		break;
+	default:
+		break;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id), 1950);
+
+	/* set up to autoclear timer, and the vectors */
+	mask = IXGBE_EIMS_ENABLE_MASK;
+	mask &= ~(IXGBE_EIMS_OTHER |
+		  IXGBE_EIMS_MAILBOX |
+		  IXGBE_EIMS_LSC);
+
+	IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 	uint16_t queue_idx, uint16_t tx_rate)
 {
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index 1383194..328c387 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -38,6 +38,8 @@ 
 #include "ixgbe/ixgbe_dcb_82598.h"
 #include "ixgbe_bypass.h"
 
+#include <rte_spinlock.h>
+
 /* need update link, bit flag */
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
@@ -98,6 +100,11 @@ 
 #define IXGBE_5TUPLE_MAX_PRI            7
 #define IXGBE_5TUPLE_MIN_PRI            1
 
+#define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf interrupt enable mask */
+#define IXGBE_VF_MAXMSIVECTOR			1
+/* maximum other interrupts besides rx&tx*/
+#define IXGBE_MAX_OTHER_INTR		1
+#define IXGBEVF_MAX_OTHER_INTR		1
 /*
  * Information about the fdir mode.
  */
@@ -116,6 +123,7 @@  struct ixgbe_hw_fdir_info {
 struct ixgbe_interrupt {
 	uint32_t flags;
 	uint32_t mask;
+	rte_spinlock_t lock;
 };
 
 struct ixgbe_stat_mapping_registers {
@@ -260,6 +268,7 @@  uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
 		uint16_t rx_queue_id);
 
 int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
 
 int ixgbe_dev_rx_init(struct rte_eth_dev *dev);