[v2,09/22] net/ixgbe: use common Tx queue structure

Message ID 20241203164132.2686558-10-bruce.richardson@intel.com (mailing list archive)
State Superseded
Delegated to: Thomas Monjalon
Headers
Series Reduce code duplication across Intel NIC drivers |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Bruce Richardson Dec. 3, 2024, 4:41 p.m. UTC
Merge in additional fields used by the ixgbe driver and then convert it
over to using the common Tx queue structure.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/net/_common_intel/tx.h                | 14 +++-
 drivers/net/ixgbe/ixgbe_ethdev.c              |  4 +-
 .../ixgbe/ixgbe_recycle_mbufs_vec_common.c    |  2 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                | 64 +++++++++----------
 drivers/net/ixgbe/ixgbe_rxtx.h                | 56 ++--------------
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     | 26 ++++----
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c       | 14 ++--
 drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c        | 14 ++--
 8 files changed, 80 insertions(+), 114 deletions(-)
  

Patch

diff --git a/drivers/net/_common_intel/tx.h b/drivers/net/_common_intel/tx.h
index c4a1a0c816..51ae3b051d 100644
--- a/drivers/net/_common_intel/tx.h
+++ b/drivers/net/_common_intel/tx.h
@@ -34,9 +34,13 @@  struct ci_tx_queue {
 		volatile struct i40e_tx_desc *i40e_tx_ring;
 		volatile struct iavf_tx_desc *iavf_tx_ring;
 		volatile struct ice_tx_desc *ice_tx_ring;
+		volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
 	};
 	volatile uint8_t *qtx_tail;               /* register address of tail */
-	struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
+	union {
+		struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
+		struct ci_tx_entry_vec *sw_ring_vec;
+	};
 	rte_iova_t tx_ring_dma;        /* TX ring DMA address */
 	uint16_t nb_tx_desc;           /* number of TX descriptors */
 	uint16_t tx_tail; /* current value of tail register */
@@ -87,6 +91,14 @@  struct ci_tx_queue {
 			uint8_t tc;
 			bool use_ctx;  /* with ctx info, each pkt needs two descriptors */
 		};
+		struct { /* ixgbe specific values */
+			const struct ixgbe_txq_ops *ops;
+			struct ixgbe_advctx_info *ctx_cache;
+			uint32_t ctx_curr;
+#ifdef RTE_LIB_SECURITY
+			uint8_t using_ipsec;  /**< indicates that IPsec TX feature is in use */
+#endif
+		};
 	};
 };
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 8bee97d191..5f18fbaad5 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1118,7 +1118,7 @@  eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	 * RX and TX function.
 	 */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
-		struct ixgbe_tx_queue *txq;
+		struct ci_tx_queue *txq;
 		/* TX queue function in primary, set by last queue initialized
 		 * Tx queue may not initialized by primary process
 		 */
@@ -1623,7 +1623,7 @@  eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 	 * RX function
 	 */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
-		struct ixgbe_tx_queue *txq;
+		struct ci_tx_queue *txq;
 		/* TX queue function in primary, set by last queue initialized
 		 * Tx queue may not initialized by primary process
 		 */
diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
index a878db3150..3fd05ed5eb 100644
--- a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
@@ -51,7 +51,7 @@  uint16_t
 ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
 		struct rte_eth_recycle_rxq_info *recycle_rxq_info)
 {
-	struct ixgbe_tx_queue *txq = tx_queue;
+	struct ci_tx_queue *txq = tx_queue;
 	struct ci_tx_entry *txep;
 	struct rte_mbuf **rxep;
 	int i, n;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2ca26cd132..344ef85685 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -98,7 +98,7 @@ 
  * Return the total number of buffers freed.
  */
 static __rte_always_inline int
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
 {
 	struct ci_tx_entry *txep;
 	uint32_t status;
@@ -195,7 +195,7 @@  tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
  * Copy mbuf pointers to the S/W ring.
  */
 static inline void
-ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts,
 		      uint16_t nb_pkts)
 {
 	volatile union ixgbe_adv_tx_desc *txdp = &txq->ixgbe_tx_ring[txq->tx_tail];
@@ -231,7 +231,7 @@  static inline uint16_t
 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	     uint16_t nb_pkts)
 {
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 	volatile union ixgbe_adv_tx_desc *tx_r = txq->ixgbe_tx_ring;
 	uint16_t n = 0;
 
@@ -344,7 +344,7 @@  ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		    uint16_t nb_pkts)
 {
 	uint16_t nb_tx = 0;
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
 	while (nb_pkts) {
 		uint16_t ret, num;
@@ -362,7 +362,7 @@  ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 static inline void
-ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
+ixgbe_set_xmit_ctx(struct ci_tx_queue *txq,
 		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
 		uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
 		__rte_unused uint64_t *mdata)
@@ -493,7 +493,7 @@  ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ci_tx_queue *txq, uint64_t flags,
 		   union ixgbe_tx_offload tx_offload)
 {
 	/* If match with the current used context */
@@ -561,7 +561,7 @@  tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 
 /* Reset transmit descriptors after they have been used */
 static inline int
-ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ci_tx_queue *txq)
 {
 	struct ci_tx_entry *sw_ring = txq->sw_ring;
 	volatile union ixgbe_adv_tx_desc *txr = txq->ixgbe_tx_ring;
@@ -623,7 +623,7 @@  uint16_t
 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts)
 {
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	struct ci_tx_entry *sw_ring;
 	struct ci_tx_entry *txe, *txn;
 	volatile union ixgbe_adv_tx_desc *txr;
@@ -963,7 +963,7 @@  ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	int i, ret;
 	uint64_t ol_flags;
 	struct rte_mbuf *m;
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
 	for (i = 0; i < nb_pkts; i++) {
 		m = tx_pkts[i];
@@ -2335,7 +2335,7 @@  ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
  **********************************************************************/
 
 static void __rte_cold
-ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ci_tx_queue *txq)
 {
 	unsigned i;
 
@@ -2350,7 +2350,7 @@  ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 }
 
 static int
-ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
+ixgbe_tx_done_cleanup_full(struct ci_tx_queue *txq, uint32_t free_cnt)
 {
 	struct ci_tx_entry *swr_ring = txq->sw_ring;
 	uint16_t i, tx_last, tx_id;
@@ -2408,7 +2408,7 @@  ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
 }
 
 static int
-ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
+ixgbe_tx_done_cleanup_simple(struct ci_tx_queue *txq,
 			uint32_t free_cnt)
 {
 	int i, n, cnt;
@@ -2432,7 +2432,7 @@  ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
 }
 
 static int
-ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused,
+ixgbe_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused,
 			uint32_t free_cnt __rte_unused)
 {
 	return -ENOTSUP;
@@ -2441,7 +2441,7 @@  ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused,
 int
 ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
 {
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 	if (txq->offloads == 0 &&
 #ifdef RTE_LIB_SECURITY
 			!(txq->using_ipsec) &&
@@ -2450,7 +2450,7 @@  ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
 		if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
 				(rte_eal_process_type() != RTE_PROC_PRIMARY ||
-					txq->sw_ring_v != NULL)) {
+					txq->sw_ring_vec != NULL)) {
 			return ixgbe_tx_done_cleanup_vec(txq, free_cnt);
 		} else {
 			return ixgbe_tx_done_cleanup_simple(txq, free_cnt);
@@ -2461,7 +2461,7 @@  ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
 }
 
 static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ci_tx_queue *txq)
 {
 	if (txq != NULL &&
 	    txq->sw_ring != NULL)
@@ -2469,7 +2469,7 @@  ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
 }
 
 static void __rte_cold
-ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release(struct ci_tx_queue *txq)
 {
 	if (txq != NULL && txq->ops != NULL) {
 		txq->ops->release_mbufs(txq);
@@ -2487,7 +2487,7 @@  ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 
 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
 static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ci_tx_queue *txq)
 {
 	static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
 	struct ci_tx_entry *txe = txq->sw_ring;
@@ -2536,7 +2536,7 @@  static const struct ixgbe_txq_ops def_txq_ops = {
  * in dev_init by secondary process when attaching to an existing ethdev.
  */
 void __rte_cold
-ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ci_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
 	if ((txq->offloads == 0) &&
@@ -2618,7 +2618,7 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 const struct rte_eth_txconf *tx_conf)
 {
 	const struct rte_memzone *tz;
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint64_t offloads;
@@ -2740,12 +2740,12 @@  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	}
 
 	/* First allocate the tx queue data structure */
-	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue) +
+	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ci_tx_queue) +
 					sizeof(struct ixgbe_advctx_info) * IXGBE_CTX_NUM,
 				 RTE_CACHE_LINE_SIZE, socket_id);
 	if (txq == NULL)
 		return -ENOMEM;
-	txq->ctx_cache = RTE_PTR_ADD(txq, sizeof(struct ixgbe_tx_queue));
+	txq->ctx_cache = RTE_PTR_ADD(txq, sizeof(struct ci_tx_queue));
 
 	/*
 	 * Allocate TX ring hardware descriptors. A memzone large enough to
@@ -3312,7 +3312,7 @@  ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
 int
 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
-	struct ixgbe_tx_queue *txq = tx_queue;
+	struct ci_tx_queue *txq = tx_queue;
 	volatile uint32_t *status;
 	uint32_t desc;
 
@@ -3377,7 +3377,7 @@  ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+		struct ci_tx_queue *txq = dev->data->tx_queues[i];
 
 		if (txq != NULL) {
 			txq->ops->release_mbufs(txq);
@@ -5284,7 +5284,7 @@  void __rte_cold
 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	uint64_t bus_addr;
 	uint32_t hlreg0;
 	uint32_t txctrl;
@@ -5402,7 +5402,7 @@  int __rte_cold
 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	struct ixgbe_rx_queue *rxq;
 	uint32_t txdctl;
 	uint32_t dmatxctl;
@@ -5572,7 +5572,7 @@  int __rte_cold
 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
 	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	uint32_t txdctl;
 	int poll_ms;
 
@@ -5611,7 +5611,7 @@  int __rte_cold
 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
 	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	uint32_t txdctl;
 	uint32_t txtdh, txtdt;
 	int poll_ms;
@@ -5685,7 +5685,7 @@  void
 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_txq_info *qinfo)
 {
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 
 	txq = dev->data->tx_queues[queue_id];
 
@@ -5877,7 +5877,7 @@  void __rte_cold
 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	uint64_t bus_addr;
 	uint32_t txctrl;
 	uint16_t i;
@@ -5918,7 +5918,7 @@  void __rte_cold
 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
+	struct ci_tx_queue *txq;
 	struct ixgbe_rx_queue *rxq;
 	uint32_t txdctl;
 	uint32_t rxdctl;
@@ -6127,7 +6127,7 @@  ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
 }
 
 int
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq)
+ixgbe_txq_vec_setup(struct ci_tx_queue *txq __rte_unused)
 {
 	return -1;
 }
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 847cacf7b5..4333e5bf2f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -180,56 +180,10 @@  struct ixgbe_advctx_info {
 	union ixgbe_tx_offload tx_offload_mask;
 };
 
-/**
- * Structure associated with each TX queue.
- */
-struct ixgbe_tx_queue {
-	/** TX ring virtual address. */
-	volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
-	rte_iova_t tx_ring_dma; /**< TX ring DMA address. */
-	union {
-		struct ci_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
-		struct ci_tx_entry_vec *sw_ring_v; /**< address of SW ring for vector PMD */
-	};
-	volatile uint8_t *qtx_tail; /**< Address of TDT register. */
-	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
-	uint16_t            tx_tail;       /**< current value of TDT reg. */
-	/**< Start freeing TX buffers if there are less free descriptors than
-	     this value. */
-	uint16_t            tx_free_thresh;
-	/** Number of TX descriptors to use before RS bit is set. */
-	uint16_t            tx_rs_thresh;
-	/** Number of TX descriptors used since RS bit was set. */
-	uint16_t            nb_tx_used;
-	/** Index to last TX descriptor to have been cleaned. */
-	uint16_t            last_desc_cleaned;
-	/** Total number of TX descriptors ready to be allocated. */
-	uint16_t            nb_tx_free;
-	uint16_t tx_next_dd; /**< next desc to scan for DD bit */
-	uint16_t tx_next_rs; /**< next desc to set RS bit */
-	uint16_t            queue_id;      /**< TX queue index. */
-	uint16_t            reg_idx;       /**< TX queue register index. */
-	uint16_t            port_id;       /**< Device port identifier. */
-	uint8_t             pthresh;       /**< Prefetch threshold register. */
-	uint8_t             hthresh;       /**< Host threshold register. */
-	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
-	uint32_t            ctx_curr;      /**< Hardware context states. */
-	/** Hardware context history. */
-	struct ixgbe_advctx_info *ctx_cache;
-	const struct ixgbe_txq_ops *ops;       /**< txq ops */
-	bool            tx_deferred_start; /**< not in global dev start. */
-#ifdef RTE_LIB_SECURITY
-	uint8_t		    using_ipsec;
-	/**< indicates that IPsec TX feature is in use */
-#endif
-	const struct rte_memzone *mz;
-};
-
 struct ixgbe_txq_ops {
-	void (*release_mbufs)(struct ixgbe_tx_queue *txq);
-	void (*free_swring)(struct ixgbe_tx_queue *txq);
-	void (*reset)(struct ixgbe_tx_queue *txq);
+	void (*release_mbufs)(struct ci_tx_queue *txq);
+	void (*free_swring)(struct ci_tx_queue *txq);
+	void (*reset)(struct ci_tx_queue *txq);
 };
 
 /*
@@ -250,7 +204,7 @@  struct ixgbe_txq_ops {
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
  */
-void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ci_tx_queue *txq);
 
 /**
  * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
@@ -287,7 +241,7 @@  void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs);
 
 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+int ixgbe_txq_vec_setup(struct ci_tx_queue *txq);
 
 uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index cc51bf6eed..81fd8bb64d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -12,7 +12,7 @@ 
 #include "ixgbe_rxtx.h"
 
 static __rte_always_inline int
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
 {
 	struct ci_tx_entry_vec *txep;
 	uint32_t status;
@@ -32,7 +32,7 @@  ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
 	 * first buffer to free from S/W ring is at index
 	 * tx_next_dd - (tx_rs_thresh-1)
 	 */
-	txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
+	txep = &txq->sw_ring_vec[txq->tx_next_dd - (n - 1)];
 	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
 	if (likely(m != NULL)) {
 		free[0] = m;
@@ -79,7 +79,7 @@  tx_backlog_entry(struct ci_tx_entry_vec *txep,
 }
 
 static inline void
-_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
 {
 	unsigned int i;
 	struct ci_tx_entry_vec *txe;
@@ -92,14 +92,14 @@  _ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
 	for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
 	     i != txq->tx_tail;
 	     i = (i + 1) % txq->nb_tx_desc) {
-		txe = &txq->sw_ring_v[i];
+		txe = &txq->sw_ring_vec[i];
 		rte_pktmbuf_free_seg(txe->mbuf);
 	}
 	txq->nb_tx_free = max_desc;
 
 	/* reset tx_entry */
 	for (i = 0; i < txq->nb_tx_desc; i++) {
-		txe = &txq->sw_ring_v[i];
+		txe = &txq->sw_ring_vec[i];
 		txe->mbuf = NULL;
 	}
 }
@@ -134,22 +134,22 @@  _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
 }
 
 static inline void
-_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_tx_free_swring_vec(struct ci_tx_queue *txq)
 {
 	if (txq == NULL)
 		return;
 
 	if (txq->sw_ring != NULL) {
-		rte_free(txq->sw_ring_v - 1);
-		txq->sw_ring_v = NULL;
+		rte_free(txq->sw_ring_vec - 1);
+		txq->sw_ring_vec = NULL;
 	}
 }
 
 static inline void
-_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_reset_tx_queue_vec(struct ci_tx_queue *txq)
 {
 	static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
-	struct ci_tx_entry_vec *txe = txq->sw_ring_v;
+	struct ci_tx_entry_vec *txe = txq->sw_ring_vec;
 	uint16_t i;
 
 	/* Zero out HW ring memory */
@@ -199,14 +199,14 @@  ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
 }
 
 static inline int
-ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
+ixgbe_txq_vec_setup_default(struct ci_tx_queue *txq,
 			    const struct ixgbe_txq_ops *txq_ops)
 {
-	if (txq->sw_ring_v == NULL)
+	if (txq->sw_ring_vec == NULL)
 		return -1;
 
 	/* leave the first one for overflow */
-	txq->sw_ring_v = txq->sw_ring_v + 1;
+	txq->sw_ring_vec = txq->sw_ring_vec + 1;
 	txq->ops = txq_ops;
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 06be7ec82a..cb749a3760 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -571,7 +571,7 @@  uint16_t
 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 			   uint16_t nb_pkts)
 {
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 	volatile union ixgbe_adv_tx_desc *txdp;
 	struct ci_tx_entry_vec *txep;
 	uint16_t n, nb_commit, tx_id;
@@ -591,7 +591,7 @@  ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = txq->tx_tail;
 	txdp = &txq->ixgbe_tx_ring[tx_id];
-	txep = &txq->sw_ring_v[tx_id];
+	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
 
@@ -611,7 +611,7 @@  ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		/* avoid reach the end of ring */
 		txdp = &txq->ixgbe_tx_ring[tx_id];
-		txep = &txq->sw_ring_v[tx_id];
+		txep = &txq->sw_ring_vec[tx_id];
 	}
 
 	tx_backlog_entry(txep, tx_pkts, nb_commit);
@@ -634,7 +634,7 @@  ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 static void __rte_cold
-ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
 {
 	_ixgbe_tx_queue_release_mbufs_vec(txq);
 }
@@ -646,13 +646,13 @@  ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
 }
 
 static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ci_tx_queue *txq)
 {
 	_ixgbe_tx_free_swring_vec(txq);
 }
 
 static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ci_tx_queue *txq)
 {
 	_ixgbe_reset_tx_queue_vec(txq);
 }
@@ -670,7 +670,7 @@  ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
 }
 
 int __rte_cold
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+ixgbe_txq_vec_setup(struct ci_tx_queue *txq)
 {
 	return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
 }
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index a21a57bd55..e46550f76a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -693,7 +693,7 @@  uint16_t
 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 			   uint16_t nb_pkts)
 {
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 	volatile union ixgbe_adv_tx_desc *txdp;
 	struct ci_tx_entry_vec *txep;
 	uint16_t n, nb_commit, tx_id;
@@ -713,7 +713,7 @@  ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = txq->tx_tail;
 	txdp = &txq->ixgbe_tx_ring[tx_id];
-	txep = &txq->sw_ring_v[tx_id];
+	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
 
@@ -734,7 +734,7 @@  ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		/* avoid reach the end of ring */
 		txdp = &txq->ixgbe_tx_ring[tx_id];
-		txep = &txq->sw_ring_v[tx_id];
+		txep = &txq->sw_ring_vec[tx_id];
 	}
 
 	tx_backlog_entry(txep, tx_pkts, nb_commit);
@@ -757,7 +757,7 @@  ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 static void __rte_cold
-ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
 {
 	_ixgbe_tx_queue_release_mbufs_vec(txq);
 }
@@ -769,13 +769,13 @@  ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
 }
 
 static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ci_tx_queue *txq)
 {
 	_ixgbe_tx_free_swring_vec(txq);
 }
 
 static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ci_tx_queue *txq)
 {
 	_ixgbe_reset_tx_queue_vec(txq);
 }
@@ -793,7 +793,7 @@  ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
 }
 
 int __rte_cold
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+ixgbe_txq_vec_setup(struct ci_tx_queue *txq)
 {
 	return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
 }