diff mbox

[dpdk-dev,5/5] ixgbe: rename igb_* to ixgbe_*

Message ID 1425695004-29605-6-git-send-email-stephen@networkplumber.org (mailing list archive)
State Accepted, archived
Headers show

Commit Message

Stephen Hemminger March 7, 2015, 2:23 a.m. UTC
To avoid any possible confusion or breakage, rename all the structures
of ixgbe driver to use ixgbe_ rather than igb_ because igb is a
different driver.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c   |   2 +-
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 124 +++++++++++++++++-----------------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.h     |  26 +++----
 lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c |  52 +++++++-------
 4 files changed, 102 insertions(+), 102 deletions(-)

Comments

Bruce Richardson March 9, 2015, 1:49 p.m. UTC | #1
On Fri, Mar 06, 2015 at 06:23:24PM -0800, Stephen Hemminger wrote:
> To avoid any possible confusion or breakage, rename all the structures
> of ixgbe driver to use ixgbe_ rather than igb_ because igb is a
> different driver.
> 
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Apart from one small fix to a call to set_tx_function() that belongs in patch
4, this looks ok.

Acked-by: Bruce Richardson <bruce.richardson@intel.com>

> ---
>  lib/librte_pmd_ixgbe/ixgbe_ethdev.c   |   2 +-
>  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 124 +++++++++++++++++-----------------
>  lib/librte_pmd_ixgbe/ixgbe_rxtx.h     |  26 +++----
>  lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c |  52 +++++++-------
>  4 files changed, 102 insertions(+), 102 deletions(-)
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> index e1504f4..5473858 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> @@ -748,7 +748,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
>  	 * RX and TX function.
>  	 */
>  	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
> -		struct igb_tx_queue *txq;
> +		struct ixgbe_tx_queue *txq;
>  		/* TX queue function in primary, set by last queue initialized
>  		 * Tx queue may not initialized by primary process */
>  		if (eth_dev->data->tx_queues) {
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index c5ba687..1848a13 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -122,9 +122,9 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
>   * Return the total number of buffers freed.
>   */
>  static inline int __attribute__((always_inline))
> -ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
> +ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
>  {
> -	struct igb_tx_entry *txep;
> +	struct ixgbe_tx_entry *txep;
>  	uint32_t status;
>  	int i;
>  
> @@ -208,11 +208,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
>   * Copy mbuf pointers to the S/W ring.
>   */
>  static inline void
> -ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
> +ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
>  		      uint16_t nb_pkts)
>  {
>  	volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
> -	struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
> +	struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
>  	const int N_PER_LOOP = 4;
>  	const int N_PER_LOOP_MASK = N_PER_LOOP-1;
>  	int mainpart, leftover;
> @@ -244,7 +244,7 @@ static inline uint16_t
>  tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  	     uint16_t nb_pkts)
>  {
> -	struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
> +	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
>  	volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
>  	uint16_t n = 0;
>  
> @@ -352,7 +352,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
>  }
>  
>  static inline void
> -ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
> +ixgbe_set_xmit_ctx(struct ixgbe_tx_queue* txq,
>  		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
>  		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
>  {
> @@ -442,7 +442,7 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
>   * or create a new context descriptor.
>   */
>  static inline uint32_t
> -what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
> +what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
>  		union ixgbe_tx_offload tx_offload)
>  {
>  	/* If match with the current used context */
> @@ -498,9 +498,9 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
>  
>  /* Reset transmit descriptors after they have been used */
>  static inline int
> -ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
> +ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
>  {
> -	struct igb_tx_entry *sw_ring = txq->sw_ring;
> +	struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
>  	volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
>  	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
>  	uint16_t nb_tx_desc = txq->nb_tx_desc;
> @@ -559,9 +559,9 @@ uint16_t
>  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		uint16_t nb_pkts)
>  {
> -	struct igb_tx_queue *txq;
> -	struct igb_tx_entry *sw_ring;
> -	struct igb_tx_entry *txe, *txn;
> +	struct ixgbe_tx_queue *txq;
> +	struct ixgbe_tx_entry *sw_ring;
> +	struct ixgbe_tx_entry *txe, *txn;
>  	volatile union ixgbe_adv_tx_desc *txr;
>  	volatile union ixgbe_adv_tx_desc *txd;
>  	struct rte_mbuf     *tx_pkt;
> @@ -938,10 +938,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
>  #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
>  #endif
>  static inline int
> -ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
> +ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
>  {
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_entry *rxep;
> +	struct ixgbe_rx_entry *rxep;
>  	struct rte_mbuf *mb;
>  	uint16_t pkt_len;
>  	uint64_t pkt_flags;
> @@ -1022,10 +1022,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
>  }
>  
>  static inline int
> -ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
> +ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
>  {
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_entry *rxep;
> +	struct ixgbe_rx_entry *rxep;
>  	struct rte_mbuf *mb;
>  	uint16_t alloc_idx;
>  	uint64_t dma_addr;
> @@ -1071,7 +1071,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
>  }
>  
>  static inline uint16_t
> -ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
> +ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>  			 uint16_t nb_pkts)
>  {
>  	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
> @@ -1095,7 +1095,7 @@ static inline uint16_t
>  rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  	     uint16_t nb_pkts)
>  {
> -	struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
> +	struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
>  	uint16_t nb_rx = 0;
>  
>  	/* Any previously recv'd pkts will be returned from the Rx stage */
> @@ -1177,11 +1177,11 @@ uint16_t
>  ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  		uint16_t nb_pkts)
>  {
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	volatile union ixgbe_adv_rx_desc *rx_ring;
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_entry *sw_ring;
> -	struct igb_rx_entry *rxe;
> +	struct ixgbe_rx_entry *sw_ring;
> +	struct ixgbe_rx_entry *rxe;
>  	struct rte_mbuf *rxm;
>  	struct rte_mbuf *nmb;
>  	union ixgbe_adv_rx_desc rxd;
> @@ -1359,11 +1359,11 @@ uint16_t
>  ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  			  uint16_t nb_pkts)
>  {
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	volatile union ixgbe_adv_rx_desc *rx_ring;
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_entry *sw_ring;
> -	struct igb_rx_entry *rxe;
> +	struct ixgbe_rx_entry *sw_ring;
> +	struct ixgbe_rx_entry *rxe;
>  	struct rte_mbuf *first_seg;
>  	struct rte_mbuf *last_seg;
>  	struct rte_mbuf *rxm;
> @@ -1675,7 +1675,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
>  }
>  
>  static void
> -ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
> +ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
>  {
>  	unsigned i;
>  
> @@ -1690,7 +1690,7 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
>  }
>  
>  static void
> -ixgbe_tx_free_swring(struct igb_tx_queue *txq)
> +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
>  {
>  	if (txq != NULL &&
>  	    txq->sw_ring != NULL)
> @@ -1698,7 +1698,7 @@ ixgbe_tx_free_swring(struct igb_tx_queue *txq)
>  }
>  
>  static void
> -ixgbe_tx_queue_release(struct igb_tx_queue *txq)
> +ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
>  {
>  	if (txq != NULL && txq->ops != NULL) {
>  		txq->ops->release_mbufs(txq);
> @@ -1713,13 +1713,13 @@ ixgbe_dev_tx_queue_release(void *txq)
>  	ixgbe_tx_queue_release(txq);
>  }
>  
> -/* (Re)set dynamic igb_tx_queue fields to defaults */
> +/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
>  static void
> -ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
> +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
>  {
>  	static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
>  			.buffer_addr = 0}};
> -	struct igb_tx_entry *txe = txq->sw_ring;
> +	struct ixgbe_tx_entry *txe = txq->sw_ring;
>  	uint16_t prev, i;
>  
>  	/* Zero out HW ring memory */
> @@ -1765,7 +1765,7 @@ static const struct ixgbe_txq_ops def_txq_ops = {
>   * in dev_init by secondary process when attaching to an existing ethdev.
>   */
>  void
> -ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
> +ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
>  {
>  	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
>  	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
> @@ -1802,7 +1802,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  			 const struct rte_eth_txconf *tx_conf)
>  {
>  	const struct rte_memzone *tz;
> -	struct igb_tx_queue *txq;
> +	struct ixgbe_tx_queue *txq;
>  	struct ixgbe_hw     *hw;
>  	uint16_t tx_rs_thresh, tx_free_thresh;
>  
> @@ -1899,7 +1899,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  	}
>  
>  	/* First allocate the tx queue data structure */
> -	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
> +	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
>  				 RTE_CACHE_LINE_SIZE, socket_id);
>  	if (txq == NULL)
>  		return (-ENOMEM);
> @@ -1948,7 +1948,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  
>  	/* Allocate software ring */
>  	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
> -				sizeof(struct igb_tx_entry) * nb_desc,
> +				sizeof(struct ixgbe_tx_entry) * nb_desc,
>  				RTE_CACHE_LINE_SIZE, socket_id);
>  	if (txq->sw_ring == NULL) {
>  		ixgbe_tx_queue_release(txq);
> @@ -1958,7 +1958,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
>  
>  	/* set up vector or scalar TX function as appropriate */
> -	set_tx_function(dev, txq);
> +	ixgbe_set_tx_function(dev, txq);
>  
>  	txq->ops->reset(txq);
>  
> @@ -1969,7 +1969,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  }
>  
>  static void
> -ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
> +ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
>  {
>  	unsigned i;
>  
> @@ -1994,7 +1994,7 @@ ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
>  }
>  
>  static void
> -ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
> +ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
>  {
>  	if (rxq != NULL) {
>  		ixgbe_rx_queue_release_mbufs(rxq);
> @@ -2019,9 +2019,9 @@ ixgbe_dev_rx_queue_release(void *rxq)
>   */
>  static inline int
>  #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
> -check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
> +check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
>  #else
> -check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
> +check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
>  #endif
>  {
>  	int ret = 0;
> @@ -2071,9 +2071,9 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
>  	return ret;
>  }
>  
> -/* Reset dynamic igb_rx_queue fields back to defaults */
> +/* Reset dynamic ixgbe_rx_queue fields back to defaults */
>  static void
> -ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
> +ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
>  {
>  	static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
>  			.pkt_addr = 0}};
> @@ -2137,7 +2137,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
>  			 struct rte_mempool *mp)
>  {
>  	const struct rte_memzone *rz;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	struct ixgbe_hw     *hw;
>  	int use_def_burst_func = 1;
>  	uint16_t len;
> @@ -2163,7 +2163,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
>  	}
>  
>  	/* First allocate the rx queue data structure */
> -	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
> +	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
>  				 RTE_CACHE_LINE_SIZE, socket_id);
>  	if (rxq == NULL)
>  		return (-ENOMEM);
> @@ -2230,7 +2230,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
>  	len = nb_desc;
>  #endif
>  	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
> -					  sizeof(struct igb_rx_entry) * len,
> +					  sizeof(struct ixgbe_rx_entry) * len,
>  					  RTE_CACHE_LINE_SIZE, socket_id);
>  	if (rxq->sw_ring == NULL) {
>  		ixgbe_rx_queue_release(rxq);
> @@ -2284,7 +2284,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
>  {
>  #define IXGBE_RXQ_SCAN_INTERVAL 4
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	uint32_t desc = 0;
>  
>  	if (rx_queue_id >= dev->data->nb_rx_queues) {
> @@ -2311,7 +2311,7 @@ int
>  ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
>  {
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_queue *rxq = rx_queue;
> +	struct ixgbe_rx_queue *rxq = rx_queue;
>  	uint32_t desc;
>  
>  	if (unlikely(offset >= rxq->nb_rx_desc))
> @@ -2332,7 +2332,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
>  	PMD_INIT_FUNC_TRACE();
>  
>  	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -		struct igb_tx_queue *txq = dev->data->tx_queues[i];
> +		struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
>  		if (txq != NULL) {
>  			txq->ops->release_mbufs(txq);
>  			txq->ops->reset(txq);
> @@ -2340,7 +2340,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
>  	}
>  
>  	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		struct igb_rx_queue *rxq = dev->data->rx_queues[i];
> +		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
>  		if (rxq != NULL) {
>  			ixgbe_rx_queue_release_mbufs(rxq);
>  			ixgbe_reset_rx_queue(rxq);
> @@ -3296,9 +3296,9 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
>  }
>  
>  static int
> -ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
> +ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
>  {
> -	struct igb_rx_entry *rxe = rxq->sw_ring;
> +	struct ixgbe_rx_entry *rxe = rxq->sw_ring;
>  	uint64_t dma_addr;
>  	unsigned i;
>  
> @@ -3512,7 +3512,7 @@ int
>  ixgbe_dev_rx_init(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	struct rte_pktmbuf_pool_private *mbp_priv;
>  	uint64_t bus_addr;
>  	uint32_t rxctrl;
> @@ -3696,7 +3696,7 @@ void
>  ixgbe_dev_tx_init(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_tx_queue *txq;
> +	struct ixgbe_tx_queue *txq;
>  	uint64_t bus_addr;
>  	uint32_t hlreg0;
>  	uint32_t txctrl;
> @@ -3792,8 +3792,8 @@ int
>  ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_tx_queue *txq;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_tx_queue *txq;
> +	struct ixgbe_rx_queue *rxq;
>  	uint32_t txdctl;
>  	uint32_t dmatxctl;
>  	uint32_t rxctrl;
> @@ -3859,7 +3859,7 @@ int
>  ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	uint32_t rxdctl;
>  	int poll_ms;
>  
> @@ -3904,7 +3904,7 @@ int
>  ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	uint32_t rxdctl;
>  	int poll_ms;
>  
> @@ -3946,7 +3946,7 @@ int
>  ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_tx_queue *txq;
> +	struct ixgbe_tx_queue *txq;
>  	uint32_t txdctl;
>  	int poll_ms;
>  
> @@ -3987,7 +3987,7 @@ int
>  ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_tx_queue *txq;
> +	struct ixgbe_tx_queue *txq;
>  	uint32_t txdctl;
>  	uint32_t txtdh, txtdt;
>  	int poll_ms;
> @@ -4047,7 +4047,7 @@ int
>  ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_rx_queue *rxq;
>  	struct rte_pktmbuf_pool_private *mbp_priv;
>  	uint64_t bus_addr;
>  	uint32_t srrctl, psrtype = 0;
> @@ -4190,7 +4190,7 @@ void
>  ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_tx_queue *txq;
> +	struct ixgbe_tx_queue *txq;
>  	uint64_t bus_addr;
>  	uint32_t txctrl;
>  	uint16_t i;
> @@ -4231,8 +4231,8 @@ void
>  ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw     *hw;
> -	struct igb_tx_queue *txq;
> -	struct igb_rx_queue *rxq;
> +	struct ixgbe_tx_queue *txq;
> +	struct ixgbe_rx_queue *rxq;
>  	uint32_t txdctl;
>  	uint32_t rxdctl;
>  	uint16_t i;
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> index 42d59f9..4cc1d6c 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> @@ -75,14 +75,14 @@
>  /**
>   * Structure associated with each descriptor of the RX ring of a RX queue.
>   */
> -struct igb_rx_entry {
> +struct ixgbe_rx_entry {
>  	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
>  };
>  
>  /**
>   * Structure associated with each descriptor of the TX ring of a TX queue.
>   */
> -struct igb_tx_entry {
> +struct ixgbe_tx_entry {
>  	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
>  	uint16_t next_id; /**< Index of next descriptor in ring. */
>  	uint16_t last_id; /**< Index of last scattered descriptor. */
> @@ -91,20 +91,20 @@ struct igb_tx_entry {
>  /**
>   * Structure associated with each descriptor of the TX ring of a TX queue.
>   */
> -struct igb_tx_entry_v {
> +struct ixgbe_tx_entry_v {
>  	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
>  };
>  
>  /**
>   * Structure associated with each RX queue.
>   */
> -struct igb_rx_queue {
> +struct ixgbe_rx_queue {
>  	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
>  	volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
>  	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
>  	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
>  	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
> -	struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
> +	struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
>  	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
>  	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
>  	uint64_t            mbuf_initializer; /**< value to init mbufs */
> @@ -182,11 +182,11 @@ struct ixgbe_advctx_info {
>  /**
>   * Structure associated with each TX queue.
>   */
> -struct igb_tx_queue {
> +struct ixgbe_tx_queue {
>  	/** TX ring virtual address. */
>  	volatile union ixgbe_adv_tx_desc *tx_ring;
>  	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
> -	struct igb_tx_entry *sw_ring;      /**< virtual address of SW ring. */
> +	struct ixgbe_tx_entry *sw_ring;      /**< virtual address of SW ring. */
>  	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
>  	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
>  	uint16_t            tx_tail;       /**< current value of TDT reg. */
> @@ -216,9 +216,9 @@ struct igb_tx_queue {
>  };
>  
>  struct ixgbe_txq_ops {
> -	void (*release_mbufs)(struct igb_tx_queue *txq);
> -	void (*free_swring)(struct igb_tx_queue *txq);
> -	void (*reset)(struct igb_tx_queue *txq);
> +	void (*release_mbufs)(struct ixgbe_tx_queue *txq);
> +	void (*free_swring)(struct ixgbe_tx_queue *txq);
> +	void (*reset)(struct ixgbe_tx_queue *txq);
>  };
>  
>  /*
> @@ -253,7 +253,7 @@ struct ixgbe_txq_ops {
>   * the queue parameters. Used in tx_queue_setup by primary process and then
>   * in dev_init by secondary process when attaching to an existing ethdev.
>   */
> -void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);
> +void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
>  
>  #ifdef RTE_IXGBE_INC_VECTOR
>  uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> @@ -262,8 +262,8 @@ uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
>  		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
>  uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		uint16_t nb_pkts);
> -int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
> -int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
> +int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
> +int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
>  int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
>  #endif
>  
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
> index 11e9f12..9d8fa8d 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
> @@ -45,12 +45,12 @@
>  #endif
>  
>  static inline void
> -ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
> +ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
>  {
>  	int i;
>  	uint16_t rx_id;
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
> +	struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
>  	struct rte_mbuf *mb0, *mb1;
>  	__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
>  			RTE_PKTMBUF_HEADROOM);
> @@ -187,11 +187,11 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
>   * - don't support ol_flags for rss and csum err
>   */
>  static inline uint16_t
> -_recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
> +_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>  		uint16_t nb_pkts, uint8_t *split_packet)
>  {
>  	volatile union ixgbe_adv_rx_desc *rxdp;
> -	struct igb_rx_entry *sw_ring;
> +	struct ixgbe_rx_entry *sw_ring;
>  	uint16_t nb_pkts_recd;
>  	int pos;
>  	uint64_t var;
> @@ -396,7 +396,7 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
>  }
>  
>  static inline uint16_t
> -reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
> +reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
>  		uint16_t nb_bufs, uint8_t *split_flags)
>  {
>  	struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
> @@ -468,7 +468,7 @@ uint16_t
>  ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
>  		uint16_t nb_pkts)
>  {
> -	struct igb_rx_queue *rxq = rx_queue;
> +	struct ixgbe_rx_queue *rxq = rx_queue;
>  	uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
>  
>  	/* get some new buffers */
> @@ -517,9 +517,9 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
>  }
>  
>  static inline int __attribute__((always_inline))
> -ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
> +ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
>  {
> -	struct igb_tx_entry_v *txep;
> +	struct ixgbe_tx_entry_v *txep;
>  	uint32_t status;
>  	uint32_t n;
>  	uint32_t i;
> @@ -537,7 +537,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
>  	 * first buffer to free from S/W ring is at index
>  	 * tx_next_dd - (tx_rs_thresh-1)
>  	 */
> -	txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
> +	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
>  			(n - 1)];
>  	m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
>  	if (likely(m != NULL)) {
> @@ -575,7 +575,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
>  }
>  
>  static inline void __attribute__((always_inline))
> -tx_backlog_entry(struct igb_tx_entry_v *txep,
> +tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
>  		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  {
>  	int i;
> @@ -587,9 +587,9 @@ uint16_t
>  ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		       uint16_t nb_pkts)
>  {
> -	struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
> +	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
>  	volatile union ixgbe_adv_tx_desc *txdp;
> -	struct igb_tx_entry_v *txep;
> +	struct ixgbe_tx_entry_v *txep;
>  	uint16_t n, nb_commit, tx_id;
>  	uint64_t flags = DCMD_DTYP_FLAGS;
>  	uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
> @@ -607,7 +607,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  
>  	tx_id = txq->tx_tail;
>  	txdp = &txq->tx_ring[tx_id];
> -	txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
> +	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];
>  
>  	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
>  
> @@ -628,7 +628,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  
>  		/* avoid reach the end of ring */
>  		txdp = &(txq->tx_ring[tx_id]);
> -		txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
> +		txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);
>  	}
>  
>  	tx_backlog_entry(txep, tx_pkts, nb_commit);
> @@ -651,10 +651,10 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  }
>  
>  static void
> -ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
> +ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
>  {
>  	unsigned i;
> -	struct igb_tx_entry_v *txe;
> +	struct ixgbe_tx_entry_v *txe;
>  	uint16_t nb_free, max_desc;
>  
>  	if (txq->sw_ring != NULL) {
> @@ -664,36 +664,36 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
>  		for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
>  		     nb_free < max_desc && i != txq->tx_tail;
>  		     i = (i + 1) & max_desc) {
> -			txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
> +			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
>  			if (txe->mbuf != NULL)
>  				rte_pktmbuf_free_seg(txe->mbuf);
>  		}
>  		/* reset tx_entry */
>  		for (i = 0; i < txq->nb_tx_desc; i++) {
> -			txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
> +			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
>  			txe->mbuf = NULL;
>  		}
>  	}
>  }
>  
>  static void
> -ixgbe_tx_free_swring(struct igb_tx_queue *txq)
> +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
>  {
>  	if (txq == NULL)
>  		return;
>  
>  	if (txq->sw_ring != NULL) {
> -		rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
> +		rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);
>  		txq->sw_ring = NULL;
>  	}
>  }
>  
>  static void
> -ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
> +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
>  {
>  	static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
>  			.buffer_addr = 0} };
> -	struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
> +	struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;
>  	uint16_t i;
>  
>  	/* Zero out HW ring memory */
> @@ -730,7 +730,7 @@ static const struct ixgbe_txq_ops vec_txq_ops = {
>  };
>  
>  int
> -ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
> +ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
>  {
>  	uintptr_t p;
>  	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
> @@ -747,14 +747,14 @@ ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
>  	return 0;
>  }
>  
> -int ixgbe_txq_vec_setup(struct igb_tx_queue *txq)
> +int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
>  {
>  	if (txq->sw_ring == NULL)
>  		return -1;
>  
>  	/* leave the first one for overflow */
> -	txq->sw_ring = (struct igb_tx_entry *)
> -		((struct igb_tx_entry_v *)txq->sw_ring + 1);
> +	txq->sw_ring = (struct ixgbe_tx_entry *)
> +		((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);
>  	txq->ops = &vec_txq_ops;
>  
>  	return 0;
> -- 
> 2.1.4
>
Ouyang Changchun March 10, 2015, 5:14 a.m. UTC | #2
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen
> Hemminger
> Sent: Saturday, March 7, 2015 10:23 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 5/5] ixgbe: rename igb_* to ixgbe_*
> 
> To avoid any possible confusion or breakage, rename all the structures of
> ixgbe driver to use ixgbe_ rather than igb_ because igb is a different driver.
> 
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>

Acked-by: Changchun Ouyang <changchun.ouyang@intel.com>
diff mbox

Patch

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index e1504f4..5473858 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -748,7 +748,7 @@  eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	 * RX and TX function.
 	 */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-		struct igb_tx_queue *txq;
+		struct ixgbe_tx_queue *txq;
 		/* TX queue function in primary, set by last queue initialized
 		 * Tx queue may not initialized by primary process */
 		if (eth_dev->data->tx_queues) {
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index c5ba687..1848a13 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -122,9 +122,9 @@  rte_rxmbuf_alloc(struct rte_mempool *mp)
  * Return the total number of buffers freed.
  */
 static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
 {
-	struct igb_tx_entry *txep;
+	struct ixgbe_tx_entry *txep;
 	uint32_t status;
 	int i;
 
@@ -208,11 +208,11 @@  tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
  * Copy mbuf pointers to the S/W ring.
  */
 static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
 		      uint16_t nb_pkts)
 {
 	volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
-	struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+	struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
 	const int N_PER_LOOP = 4;
 	const int N_PER_LOOP_MASK = N_PER_LOOP-1;
 	int mainpart, leftover;
@@ -244,7 +244,7 @@  static inline uint16_t
 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	     uint16_t nb_pkts)
 {
-	struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
 	volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
 	uint16_t n = 0;
 
@@ -352,7 +352,7 @@  ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue* txq,
 		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
 		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
 {
@@ -442,7 +442,7 @@  ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
 		union ixgbe_tx_offload tx_offload)
 {
 	/* If match with the current used context */
@@ -498,9 +498,9 @@  tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 
 /* Reset transmit descriptors after they have been used */
 static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
 {
-	struct igb_tx_entry *sw_ring = txq->sw_ring;
+	struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
 	volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
 	uint16_t nb_tx_desc = txq->nb_tx_desc;
@@ -559,9 +559,9 @@  uint16_t
 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts)
 {
-	struct igb_tx_queue *txq;
-	struct igb_tx_entry *sw_ring;
-	struct igb_tx_entry *txe, *txn;
+	struct ixgbe_tx_queue *txq;
+	struct ixgbe_tx_entry *sw_ring;
+	struct ixgbe_tx_entry *txe, *txn;
 	volatile union ixgbe_adv_tx_desc *txr;
 	volatile union ixgbe_adv_tx_desc *txd;
 	struct rte_mbuf     *tx_pkt;
@@ -938,10 +938,10 @@  rx_desc_error_to_pkt_flags(uint32_t rx_status)
 #error "PMD IXGBE: LOOK_AHEAD must be 8\n"
 #endif
 static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
 {
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_entry *rxep;
+	struct ixgbe_rx_entry *rxep;
 	struct rte_mbuf *mb;
 	uint16_t pkt_len;
 	uint64_t pkt_flags;
@@ -1022,10 +1022,10 @@  ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
 }
 
 static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
 {
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_entry *rxep;
+	struct ixgbe_rx_entry *rxep;
 	struct rte_mbuf *mb;
 	uint16_t alloc_idx;
 	uint64_t dma_addr;
@@ -1071,7 +1071,7 @@  ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
 }
 
 static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 			 uint16_t nb_pkts)
 {
 	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
@@ -1095,7 +1095,7 @@  static inline uint16_t
 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	     uint16_t nb_pkts)
 {
-	struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+	struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
 	uint16_t nb_rx = 0;
 
 	/* Any previously recv'd pkts will be returned from the Rx stage */
@@ -1177,11 +1177,11 @@  uint16_t
 ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts)
 {
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	volatile union ixgbe_adv_rx_desc *rx_ring;
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_entry *sw_ring;
-	struct igb_rx_entry *rxe;
+	struct ixgbe_rx_entry *sw_ring;
+	struct ixgbe_rx_entry *rxe;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *nmb;
 	union ixgbe_adv_rx_desc rxd;
@@ -1359,11 +1359,11 @@  uint16_t
 ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			  uint16_t nb_pkts)
 {
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	volatile union ixgbe_adv_rx_desc *rx_ring;
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_entry *sw_ring;
-	struct igb_rx_entry *rxe;
+	struct ixgbe_rx_entry *sw_ring;
+	struct ixgbe_rx_entry *rxe;
 	struct rte_mbuf *first_seg;
 	struct rte_mbuf *last_seg;
 	struct rte_mbuf *rxm;
@@ -1675,7 +1675,7 @@  ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 }
 
 static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 {
 	unsigned i;
 
@@ -1690,7 +1690,7 @@  ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
 }
 
 static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
 {
 	if (txq != NULL &&
 	    txq->sw_ring != NULL)
@@ -1698,7 +1698,7 @@  ixgbe_tx_free_swring(struct igb_tx_queue *txq)
 }
 
 static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
 {
 	if (txq != NULL && txq->ops != NULL) {
 		txq->ops->release_mbufs(txq);
@@ -1713,13 +1713,13 @@  ixgbe_dev_tx_queue_release(void *txq)
 	ixgbe_tx_queue_release(txq);
 }
 
-/* (Re)set dynamic igb_tx_queue fields to defaults */
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
 static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
 {
 	static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
 			.buffer_addr = 0}};
-	struct igb_tx_entry *txe = txq->sw_ring;
+	struct ixgbe_tx_entry *txe = txq->sw_ring;
 	uint16_t prev, i;
 
 	/* Zero out HW ring memory */
@@ -1765,7 +1765,7 @@  static const struct ixgbe_txq_ops def_txq_ops = {
  * in dev_init by secondary process when attaching to an existing ethdev.
  */
 void
-ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
 	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
@@ -1802,7 +1802,7 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 const struct rte_eth_txconf *tx_conf)
 {
 	const struct rte_memzone *tz;
-	struct igb_tx_queue *txq;
+	struct ixgbe_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 
@@ -1899,7 +1899,7 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	}
 
 	/* First allocate the tx queue data structure */
-	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
 				 RTE_CACHE_LINE_SIZE, socket_id);
 	if (txq == NULL)
 		return (-ENOMEM);
@@ -1948,7 +1948,7 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	/* Allocate software ring */
 	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
-				sizeof(struct igb_tx_entry) * nb_desc,
+				sizeof(struct ixgbe_tx_entry) * nb_desc,
 				RTE_CACHE_LINE_SIZE, socket_id);
 	if (txq->sw_ring == NULL) {
 		ixgbe_tx_queue_release(txq);
@@ -1958,7 +1958,7 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
 	/* set up vector or scalar TX function as appropriate */
-	set_tx_function(dev, txq);
+	ixgbe_set_tx_function(dev, txq);
 
 	txq->ops->reset(txq);
 
@@ -1969,7 +1969,7 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
 {
 	unsigned i;
 
@@ -1994,7 +1994,7 @@  ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
 }
 
 static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
 {
 	if (rxq != NULL) {
 		ixgbe_rx_queue_release_mbufs(rxq);
@@ -2019,9 +2019,9 @@  ixgbe_dev_rx_queue_release(void *rxq)
  */
 static inline int
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
 #else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
 #endif
 {
 	int ret = 0;
@@ -2071,9 +2071,9 @@  check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
 	return ret;
 }
 
-/* Reset dynamic igb_rx_queue fields back to defaults */
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
 static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
 {
 	static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
 			.pkt_addr = 0}};
@@ -2137,7 +2137,7 @@  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 struct rte_mempool *mp)
 {
 	const struct rte_memzone *rz;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	struct ixgbe_hw     *hw;
 	int use_def_burst_func = 1;
 	uint16_t len;
@@ -2163,7 +2163,7 @@  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	}
 
 	/* First allocate the rx queue data structure */
-	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
 				 RTE_CACHE_LINE_SIZE, socket_id);
 	if (rxq == NULL)
 		return (-ENOMEM);
@@ -2230,7 +2230,7 @@  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	len = nb_desc;
 #endif
 	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
-					  sizeof(struct igb_rx_entry) * len,
+					  sizeof(struct ixgbe_rx_entry) * len,
 					  RTE_CACHE_LINE_SIZE, socket_id);
 	if (rxq->sw_ring == NULL) {
 		ixgbe_rx_queue_release(rxq);
@@ -2284,7 +2284,7 @@  ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
 #define IXGBE_RXQ_SCAN_INTERVAL 4
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	uint32_t desc = 0;
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
@@ -2311,7 +2311,7 @@  int
 ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
 {
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_queue *rxq = rx_queue;
+	struct ixgbe_rx_queue *rxq = rx_queue;
 	uint32_t desc;
 
 	if (unlikely(offset >= rxq->nb_rx_desc))
@@ -2332,7 +2332,7 @@  ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		struct igb_tx_queue *txq = dev->data->tx_queues[i];
+		struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
 		if (txq != NULL) {
 			txq->ops->release_mbufs(txq);
 			txq->ops->reset(txq);
@@ -2340,7 +2340,7 @@  ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
 	}
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
 		if (rxq != NULL) {
 			ixgbe_rx_queue_release_mbufs(rxq);
 			ixgbe_reset_rx_queue(rxq);
@@ -3296,9 +3296,9 @@  ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
 }
 
 static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
 {
-	struct igb_rx_entry *rxe = rxq->sw_ring;
+	struct ixgbe_rx_entry *rxe = rxq->sw_ring;
 	uint64_t dma_addr;
 	unsigned i;
 
@@ -3512,7 +3512,7 @@  int
 ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	struct rte_pktmbuf_pool_private *mbp_priv;
 	uint64_t bus_addr;
 	uint32_t rxctrl;
@@ -3696,7 +3696,7 @@  void
 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_tx_queue *txq;
+	struct ixgbe_tx_queue *txq;
 	uint64_t bus_addr;
 	uint32_t hlreg0;
 	uint32_t txctrl;
@@ -3792,8 +3792,8 @@  int
 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_tx_queue *txq;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_tx_queue *txq;
+	struct ixgbe_rx_queue *rxq;
 	uint32_t txdctl;
 	uint32_t dmatxctl;
 	uint32_t rxctrl;
@@ -3859,7 +3859,7 @@  int
 ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	uint32_t rxdctl;
 	int poll_ms;
 
@@ -3904,7 +3904,7 @@  int
 ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	uint32_t rxdctl;
 	int poll_ms;
 
@@ -3946,7 +3946,7 @@  int
 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_tx_queue *txq;
+	struct ixgbe_tx_queue *txq;
 	uint32_t txdctl;
 	int poll_ms;
 
@@ -3987,7 +3987,7 @@  int
 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_tx_queue *txq;
+	struct ixgbe_tx_queue *txq;
 	uint32_t txdctl;
 	uint32_t txtdh, txtdt;
 	int poll_ms;
@@ -4047,7 +4047,7 @@  int
 ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_rx_queue *rxq;
 	struct rte_pktmbuf_pool_private *mbp_priv;
 	uint64_t bus_addr;
 	uint32_t srrctl, psrtype = 0;
@@ -4190,7 +4190,7 @@  void
 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_tx_queue *txq;
+	struct ixgbe_tx_queue *txq;
 	uint64_t bus_addr;
 	uint32_t txctrl;
 	uint16_t i;
@@ -4231,8 +4231,8 @@  void
 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct igb_tx_queue *txq;
-	struct igb_rx_queue *rxq;
+	struct ixgbe_tx_queue *txq;
+	struct ixgbe_rx_queue *rxq;
 	uint32_t txdctl;
 	uint32_t rxdctl;
 	uint16_t i;
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index 42d59f9..4cc1d6c 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -75,14 +75,14 @@ 
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
  */
-struct igb_rx_entry {
+struct ixgbe_rx_entry {
 	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
 };
 
 /**
  * Structure associated with each descriptor of the TX ring of a TX queue.
  */
-struct igb_tx_entry {
+struct ixgbe_tx_entry {
 	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
 	uint16_t next_id; /**< Index of next descriptor in ring. */
 	uint16_t last_id; /**< Index of last scattered descriptor. */
@@ -91,20 +91,20 @@  struct igb_tx_entry {
 /**
  * Structure associated with each descriptor of the TX ring of a TX queue.
  */
-struct igb_tx_entry_v {
+struct ixgbe_tx_entry_v {
 	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
 };
 
 /**
  * Structure associated with each RX queue.
  */
-struct igb_rx_queue {
+struct ixgbe_rx_queue {
 	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
 	volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
 	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
 	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
 	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
-	struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
+	struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
 	uint64_t            mbuf_initializer; /**< value to init mbufs */
@@ -182,11 +182,11 @@  struct ixgbe_advctx_info {
 /**
  * Structure associated with each TX queue.
  */
-struct igb_tx_queue {
+struct ixgbe_tx_queue {
 	/** TX ring virtual address. */
 	volatile union ixgbe_adv_tx_desc *tx_ring;
 	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
-	struct igb_tx_entry *sw_ring;      /**< virtual address of SW ring. */
+	struct ixgbe_tx_entry *sw_ring;      /**< virtual address of SW ring. */
 	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
 	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
 	uint16_t            tx_tail;       /**< current value of TDT reg. */
@@ -216,9 +216,9 @@  struct igb_tx_queue {
 };
 
 struct ixgbe_txq_ops {
-	void (*release_mbufs)(struct igb_tx_queue *txq);
-	void (*free_swring)(struct igb_tx_queue *txq);
-	void (*reset)(struct igb_tx_queue *txq);
+	void (*release_mbufs)(struct ixgbe_tx_queue *txq);
+	void (*free_swring)(struct ixgbe_tx_queue *txq);
+	void (*reset)(struct ixgbe_tx_queue *txq);
 };
 
 /*
@@ -253,7 +253,7 @@  struct ixgbe_txq_ops {
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
  */
-void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
 
 #ifdef RTE_IXGBE_INC_VECTOR
 uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -262,8 +262,8 @@  uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
-int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
 int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
 #endif
 
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
index 11e9f12..9d8fa8d 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
@@ -45,12 +45,12 @@ 
 #endif
 
 static inline void
-ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
 {
 	int i;
 	uint16_t rx_id;
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+	struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
 	struct rte_mbuf *mb0, *mb1;
 	__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
 			RTE_PKTMBUF_HEADROOM);
@@ -187,11 +187,11 @@  desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
  * - don't support ol_flags for rss and csum err
  */
 static inline uint16_t
-_recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts, uint8_t *split_packet)
 {
 	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct igb_rx_entry *sw_ring;
+	struct ixgbe_rx_entry *sw_ring;
 	uint16_t nb_pkts_recd;
 	int pos;
 	uint64_t var;
@@ -396,7 +396,7 @@  ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 static inline uint16_t
-reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
 		uint16_t nb_bufs, uint8_t *split_flags)
 {
 	struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
@@ -468,7 +468,7 @@  uint16_t
 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts)
 {
-	struct igb_rx_queue *rxq = rx_queue;
+	struct ixgbe_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
 
 	/* get some new buffers */
@@ -517,9 +517,9 @@  vtx(volatile union ixgbe_adv_tx_desc *txdp,
 }
 
 static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
 {
-	struct igb_tx_entry_v *txep;
+	struct ixgbe_tx_entry_v *txep;
 	uint32_t status;
 	uint32_t n;
 	uint32_t i;
@@ -537,7 +537,7 @@  ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
 	 * first buffer to free from S/W ring is at index
 	 * tx_next_dd - (tx_rs_thresh-1)
 	 */
-	txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
+	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
 			(n - 1)];
 	m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
 	if (likely(m != NULL)) {
@@ -575,7 +575,7 @@  ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
 }
 
 static inline void __attribute__((always_inline))
-tx_backlog_entry(struct igb_tx_entry_v *txep,
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	int i;
@@ -587,9 +587,9 @@  uint16_t
 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		       uint16_t nb_pkts)
 {
-	struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
 	volatile union ixgbe_adv_tx_desc *txdp;
-	struct igb_tx_entry_v *txep;
+	struct ixgbe_tx_entry_v *txep;
 	uint16_t n, nb_commit, tx_id;
 	uint64_t flags = DCMD_DTYP_FLAGS;
 	uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
@@ -607,7 +607,7 @@  ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = txq->tx_tail;
 	txdp = &txq->tx_ring[tx_id];
-	txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
+	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
 
@@ -628,7 +628,7 @@  ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		/* avoid reach the end of ring */
 		txdp = &(txq->tx_ring[tx_id]);
-		txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
+		txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);
 	}
 
 	tx_backlog_entry(txep, tx_pkts, nb_commit);
@@ -651,10 +651,10 @@  ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 {
 	unsigned i;
-	struct igb_tx_entry_v *txe;
+	struct ixgbe_tx_entry_v *txe;
 	uint16_t nb_free, max_desc;
 
 	if (txq->sw_ring != NULL) {
@@ -664,36 +664,36 @@  ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
 		for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
 		     nb_free < max_desc && i != txq->tx_tail;
 		     i = (i + 1) & max_desc) {
-			txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
+			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
 			if (txe->mbuf != NULL)
 				rte_pktmbuf_free_seg(txe->mbuf);
 		}
 		/* reset tx_entry */
 		for (i = 0; i < txq->nb_tx_desc; i++) {
-			txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
+			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
 			txe->mbuf = NULL;
 		}
 	}
 }
 
 static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
 {
 	if (txq == NULL)
 		return;
 
 	if (txq->sw_ring != NULL) {
-		rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
+		rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);
 		txq->sw_ring = NULL;
 	}
 }
 
 static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
 {
 	static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
 			.buffer_addr = 0} };
-	struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
+	struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;
 	uint16_t i;
 
 	/* Zero out HW ring memory */
@@ -730,7 +730,7 @@  static const struct ixgbe_txq_ops vec_txq_ops = {
 };
 
 int
-ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
 {
 	uintptr_t p;
 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
@@ -747,14 +747,14 @@  ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
 	return 0;
 }
 
-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq)
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
 {
 	if (txq->sw_ring == NULL)
 		return -1;
 
 	/* leave the first one for overflow */
-	txq->sw_ring = (struct igb_tx_entry *)
-		((struct igb_tx_entry_v *)txq->sw_ring + 1);
+	txq->sw_ring = (struct ixgbe_tx_entry *)
+		((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);
 	txq->ops = &vec_txq_ops;
 
 	return 0;