[v6,17/19] common/idpf: refine API name for queue config module

Message ID 20230203094340.8103-18-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series net/idpf: introduce idpf common modle |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Xing, Beilei Feb. 3, 2023, 9:43 a.m. UTC
  From: Beilei Xing <beilei.xing@intel.com>

This patch refines API name for queue config functions.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_rxtx.c        | 42 ++++++++--------
 drivers/common/idpf/idpf_common_rxtx.h        | 38 +++++++-------
 drivers/common/idpf/idpf_common_rxtx_avx512.c |  2 +-
 drivers/common/idpf/version.map               | 37 +++++++-------
 drivers/net/idpf/idpf_rxtx.c                  | 50 +++++++++----------
 5 files changed, 85 insertions(+), 84 deletions(-)
  

Patch

diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index bc95fef6bc..0b87aeea73 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -11,7 +11,7 @@  int idpf_timestamp_dynfield_offset = -1;
 uint64_t idpf_timestamp_dynflag;
 
 int
-idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh)
 {
 	/* The following constraints must be satisfied:
 	 * thresh < rxq->nb_rx_desc
@@ -26,8 +26,8 @@  idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
 }
 
 int
-idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
-		     uint16_t tx_free_thresh)
+idpf_qc_tx_thresh_check(uint16_t nb_desc, uint16_t tx_rs_thresh,
+			uint16_t tx_free_thresh)
 {
 	/* TX descriptors will have their RS bit set after tx_rs_thresh
 	 * descriptors have been used. The TX descriptor ring will be cleaned
@@ -74,7 +74,7 @@  idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
 }
 
 void
-idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq)
+idpf_qc_rxq_mbufs_release(struct idpf_rx_queue *rxq)
 {
 	uint16_t i;
 
@@ -90,7 +90,7 @@  idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq)
 }
 
 void
-idpf_release_txq_mbufs(struct idpf_tx_queue *txq)
+idpf_qc_txq_mbufs_release(struct idpf_tx_queue *txq)
 {
 	uint16_t nb_desc, i;
 
@@ -115,7 +115,7 @@  idpf_release_txq_mbufs(struct idpf_tx_queue *txq)
 }
 
 void
-idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq)
+idpf_qc_split_rx_descq_reset(struct idpf_rx_queue *rxq)
 {
 	uint16_t len;
 	uint32_t i;
@@ -134,7 +134,7 @@  idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq)
 }
 
 void
-idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq)
+idpf_qc_split_rx_bufq_reset(struct idpf_rx_queue *rxq)
 {
 	uint16_t len;
 	uint32_t i;
@@ -166,15 +166,15 @@  idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq)
 }
 
 void
-idpf_reset_split_rx_queue(struct idpf_rx_queue *rxq)
+idpf_qc_split_rx_queue_reset(struct idpf_rx_queue *rxq)
 {
-	idpf_reset_split_rx_descq(rxq);
-	idpf_reset_split_rx_bufq(rxq->bufq1);
-	idpf_reset_split_rx_bufq(rxq->bufq2);
+	idpf_qc_split_rx_descq_reset(rxq);
+	idpf_qc_split_rx_bufq_reset(rxq->bufq1);
+	idpf_qc_split_rx_bufq_reset(rxq->bufq2);
 }
 
 void
-idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq)
+idpf_qc_single_rx_queue_reset(struct idpf_rx_queue *rxq)
 {
 	uint16_t len;
 	uint32_t i;
@@ -205,7 +205,7 @@  idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq)
 }
 
 void
-idpf_reset_split_tx_descq(struct idpf_tx_queue *txq)
+idpf_qc_split_tx_descq_reset(struct idpf_tx_queue *txq)
 {
 	struct idpf_tx_entry *txe;
 	uint32_t i, size;
@@ -239,7 +239,7 @@  idpf_reset_split_tx_descq(struct idpf_tx_queue *txq)
 }
 
 void
-idpf_reset_split_tx_complq(struct idpf_tx_queue *cq)
+idpf_qc_split_tx_complq_reset(struct idpf_tx_queue *cq)
 {
 	uint32_t i, size;
 
@@ -257,7 +257,7 @@  idpf_reset_split_tx_complq(struct idpf_tx_queue *cq)
 }
 
 void
-idpf_reset_single_tx_queue(struct idpf_tx_queue *txq)
+idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
 {
 	struct idpf_tx_entry *txe;
 	uint32_t i, size;
@@ -294,7 +294,7 @@  idpf_reset_single_tx_queue(struct idpf_tx_queue *txq)
 }
 
 void
-idpf_rx_queue_release(void *rxq)
+idpf_qc_rx_queue_release(void *rxq)
 {
 	struct idpf_rx_queue *q = rxq;
 
@@ -324,7 +324,7 @@  idpf_rx_queue_release(void *rxq)
 }
 
 void
-idpf_tx_queue_release(void *txq)
+idpf_qc_tx_queue_release(void *txq)
 {
 	struct idpf_tx_queue *q = txq;
 
@@ -343,7 +343,7 @@  idpf_tx_queue_release(void *txq)
 }
 
 int
-idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)
+idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq)
 {
 	int err;
 	if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
@@ -360,7 +360,7 @@  idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)
 }
 
 int
-idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
+idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
 {
 	volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
 	struct rte_mbuf *mbuf = NULL;
@@ -395,7 +395,7 @@  idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
 }
 
 int
-idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)
+idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
 {
 	volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
 	struct rte_mbuf *mbuf = NULL;
@@ -1451,7 +1451,7 @@  idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
 }
 
 int __rte_cold
-idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
+idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
 {
 	rxq->ops = &def_singleq_rx_ops_vec;
 	return idpf_singleq_rx_vec_setup_default(rxq);
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 6e3ee7de25..7966d15f51 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -215,38 +215,38 @@  extern int idpf_timestamp_dynfield_offset;
 extern uint64_t idpf_timestamp_dynflag;
 
 __rte_internal
-int idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh);
+int idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh);
 __rte_internal
-int idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
-			 uint16_t tx_free_thresh);
+int idpf_qc_tx_thresh_check(uint16_t nb_desc, uint16_t tx_rs_thresh,
+			    uint16_t tx_free_thresh);
 __rte_internal
-void idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq);
+void idpf_qc_rxq_mbufs_release(struct idpf_rx_queue *rxq);
 __rte_internal
-void idpf_release_txq_mbufs(struct idpf_tx_queue *txq);
+void idpf_qc_txq_mbufs_release(struct idpf_tx_queue *txq);
 __rte_internal
-void idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq);
+void idpf_qc_split_rx_descq_reset(struct idpf_rx_queue *rxq);
 __rte_internal
-void idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq);
+void idpf_qc_split_rx_bufq_reset(struct idpf_rx_queue *rxq);
 __rte_internal
-void idpf_reset_split_rx_queue(struct idpf_rx_queue *rxq);
+void idpf_qc_split_rx_queue_reset(struct idpf_rx_queue *rxq);
 __rte_internal
-void idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq);
+void idpf_qc_single_rx_queue_reset(struct idpf_rx_queue *rxq);
 __rte_internal
-void idpf_reset_split_tx_descq(struct idpf_tx_queue *txq);
+void idpf_qc_split_tx_descq_reset(struct idpf_tx_queue *txq);
 __rte_internal
-void idpf_reset_split_tx_complq(struct idpf_tx_queue *cq);
+void idpf_qc_split_tx_complq_reset(struct idpf_tx_queue *cq);
 __rte_internal
-void idpf_reset_single_tx_queue(struct idpf_tx_queue *txq);
+void idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq);
 __rte_internal
-void idpf_rx_queue_release(void *rxq);
+void idpf_qc_rx_queue_release(void *rxq);
 __rte_internal
-void idpf_tx_queue_release(void *txq);
+void idpf_qc_tx_queue_release(void *txq);
 __rte_internal
-int idpf_register_ts_mbuf(struct idpf_rx_queue *rxq);
+int idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq);
 __rte_internal
-int idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq);
+int idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);
 __rte_internal
-int idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq);
+int idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);
 __rte_internal
 uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			       uint16_t nb_pkts);
@@ -263,9 +263,9 @@  __rte_internal
 uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 __rte_internal
-int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
+int idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
 __rte_internal
-int idpf_singleq_tx_vec_setup_avx512(struct idpf_tx_queue *txq);
+int idpf_qc_singleq_tx_vec_avx512_setup(struct idpf_tx_queue *txq);
 __rte_internal
 uint16_t idpf_singleq_recv_pkts_avx512(void *rx_queue,
 				       struct rte_mbuf **rx_pkts,
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index 6ae0e14d2f..d94e36b521 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -850,7 +850,7 @@  static const struct idpf_txq_ops avx512_singleq_tx_vec_ops = {
 };
 
 int __rte_cold
-idpf_singleq_tx_vec_setup_avx512(struct idpf_tx_queue *txq)
+idpf_qc_singleq_tx_vec_avx512_setup(struct idpf_tx_queue *txq)
 {
 	txq->ops = &avx512_singleq_tx_vec_ops;
 	return 0;
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index bd4dae503a..2ff152a353 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -4,6 +4,25 @@  INTERNAL {
 	idpf_adapter_deinit;
 	idpf_adapter_init;
 
+	idpf_qc_rx_thresh_check;
+	idpf_qc_rx_queue_release;
+	idpf_qc_rxq_mbufs_release;
+	idpf_qc_single_rx_queue_reset;
+	idpf_qc_single_rxq_mbufs_alloc;
+	idpf_qc_single_tx_queue_reset;
+	idpf_qc_singleq_rx_vec_setup;
+	idpf_qc_singleq_tx_vec_avx512_setup;
+	idpf_qc_split_rx_bufq_reset;
+	idpf_qc_split_rx_descq_reset;
+	idpf_qc_split_rx_queue_reset;
+	idpf_qc_split_rxq_mbufs_alloc;
+	idpf_qc_split_tx_complq_reset;
+	idpf_qc_split_tx_descq_reset;
+	idpf_qc_ts_mbuf_register;
+	idpf_qc_tx_queue_release;
+	idpf_qc_tx_thresh_check;
+	idpf_qc_txq_mbufs_release;
+
 	idpf_vport_deinit;
 	idpf_vport_info_init;
 	idpf_vport_init;
@@ -11,32 +30,14 @@  INTERNAL {
 	idpf_vport_irq_unmap_config;
 	idpf_vport_rss_config;
 
-	idpf_alloc_single_rxq_mbufs;
-	idpf_alloc_split_rxq_mbufs;
-	idpf_check_rx_thresh;
-	idpf_check_tx_thresh;
 	idpf_execute_vc_cmd;
 	idpf_prep_pkts;
-	idpf_register_ts_mbuf;
-	idpf_release_rxq_mbufs;
-	idpf_release_txq_mbufs;
-	idpf_reset_single_rx_queue;
-	idpf_reset_single_tx_queue;
-	idpf_reset_split_rx_bufq;
-	idpf_reset_split_rx_descq;
-	idpf_reset_split_rx_queue;
-	idpf_reset_split_tx_complq;
-	idpf_reset_split_tx_descq;
-	idpf_rx_queue_release;
 	idpf_singleq_recv_pkts;
 	idpf_singleq_recv_pkts_avx512;
-	idpf_singleq_rx_vec_setup;
-	idpf_singleq_tx_vec_setup_avx512;
 	idpf_singleq_xmit_pkts;
 	idpf_singleq_xmit_pkts_avx512;
 	idpf_splitq_recv_pkts;
 	idpf_splitq_xmit_pkts;
-	idpf_tx_queue_release;
 	idpf_vc_alloc_vectors;
 	idpf_vc_check_api_version;
 	idpf_vc_config_irq_map_unmap;
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index c0c622d64b..ec75d6f69e 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -51,11 +51,11 @@  idpf_tx_offload_convert(uint64_t offload)
 }
 
 static const struct idpf_rxq_ops def_rxq_ops = {
-	.release_mbufs = idpf_release_rxq_mbufs,
+	.release_mbufs = idpf_qc_rxq_mbufs_release,
 };
 
 static const struct idpf_txq_ops def_txq_ops = {
-	.release_mbufs = idpf_release_txq_mbufs,
+	.release_mbufs = idpf_qc_txq_mbufs_release,
 };
 
 static const struct rte_memzone *
@@ -183,7 +183,7 @@  idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
 		goto err_sw_ring_alloc;
 	}
 
-	idpf_reset_split_rx_bufq(bufq);
+	idpf_qc_split_rx_bufq_reset(bufq);
 	bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
 			 queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
 	bufq->ops = &def_rxq_ops;
@@ -242,12 +242,12 @@  idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
 		IDPF_DEFAULT_RX_FREE_THRESH :
 		rx_conf->rx_free_thresh;
-	if (idpf_check_rx_thresh(nb_desc, rx_free_thresh) != 0)
+	if (idpf_qc_rx_thresh_check(nb_desc, rx_free_thresh) != 0)
 		return -EINVAL;
 
 	/* Free memory if needed */
 	if (dev->data->rx_queues[queue_idx] != NULL) {
-		idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
 		dev->data->rx_queues[queue_idx] = NULL;
 	}
 
@@ -300,12 +300,12 @@  idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			goto err_sw_ring_alloc;
 		}
 
-		idpf_reset_single_rx_queue(rxq);
+		idpf_qc_single_rx_queue_reset(rxq);
 		rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
 				queue_idx * vport->chunks_info.rx_qtail_spacing);
 		rxq->ops = &def_rxq_ops;
 	} else {
-		idpf_reset_split_rx_descq(rxq);
+		idpf_qc_split_rx_descq_reset(rxq);
 
 		/* Setup Rx buffer queues */
 		ret = idpf_rx_split_bufq_setup(dev, rxq, 2 * queue_idx,
@@ -379,7 +379,7 @@  idpf_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
 	cq->tx_ring_phys_addr = mz->iova;
 	cq->compl_ring = mz->addr;
 	cq->mz = mz;
-	idpf_reset_split_tx_complq(cq);
+	idpf_qc_split_tx_complq_reset(cq);
 
 	txq->complq = cq;
 
@@ -413,12 +413,12 @@  idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		tx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh > 0) ?
 		tx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);
-	if (idpf_check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
+	if (idpf_qc_tx_thresh_check(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
 		return -EINVAL;
 
 	/* Free memory if needed. */
 	if (dev->data->tx_queues[queue_idx] != NULL) {
-		idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
 		dev->data->tx_queues[queue_idx] = NULL;
 	}
 
@@ -470,10 +470,10 @@  idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	if (!is_splitq) {
 		txq->tx_ring = mz->addr;
-		idpf_reset_single_tx_queue(txq);
+		idpf_qc_single_tx_queue_reset(txq);
 	} else {
 		txq->desc_ring = mz->addr;
-		idpf_reset_split_tx_descq(txq);
+		idpf_qc_split_tx_descq_reset(txq);
 
 		/* Setup tx completion queue if split model */
 		ret = idpf_tx_complq_setup(dev, txq, queue_idx,
@@ -516,7 +516,7 @@  idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		return -EINVAL;
 	}
 
-	err = idpf_register_ts_mbuf(rxq);
+	err = idpf_qc_ts_mbuf_register(rxq);
 	if (err != 0) {
 		PMD_DRV_LOG(ERR, "fail to residter timestamp mbuf %u",
 					rx_queue_id);
@@ -525,7 +525,7 @@  idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	if (rxq->bufq1 == NULL) {
 		/* Single queue */
-		err = idpf_alloc_single_rxq_mbufs(rxq);
+		err = idpf_qc_single_rxq_mbufs_alloc(rxq);
 		if (err != 0) {
 			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
 			return err;
@@ -537,12 +537,12 @@  idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
 	} else {
 		/* Split queue */
-		err = idpf_alloc_split_rxq_mbufs(rxq->bufq1);
+		err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
 		if (err != 0) {
 			PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
 			return err;
 		}
-		err = idpf_alloc_split_rxq_mbufs(rxq->bufq2);
+		err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
 		if (err != 0) {
 			PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
 			return err;
@@ -664,11 +664,11 @@  idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	rxq = dev->data->rx_queues[rx_queue_id];
 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
 		rxq->ops->release_mbufs(rxq);
-		idpf_reset_single_rx_queue(rxq);
+		idpf_qc_single_rx_queue_reset(rxq);
 	} else {
 		rxq->bufq1->ops->release_mbufs(rxq->bufq1);
 		rxq->bufq2->ops->release_mbufs(rxq->bufq2);
-		idpf_reset_split_rx_queue(rxq);
+		idpf_qc_split_rx_queue_reset(rxq);
 	}
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -695,10 +695,10 @@  idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	txq = dev->data->tx_queues[tx_queue_id];
 	txq->ops->release_mbufs(txq);
 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
-		idpf_reset_single_tx_queue(txq);
+		idpf_qc_single_tx_queue_reset(txq);
 	} else {
-		idpf_reset_split_tx_descq(txq);
-		idpf_reset_split_tx_complq(txq->complq);
+		idpf_qc_split_tx_descq_reset(txq);
+		idpf_qc_split_tx_complq_reset(txq->complq);
 	}
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -708,13 +708,13 @@  idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 void
 idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-	idpf_rx_queue_release(dev->data->rx_queues[qid]);
+	idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 void
 idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-	idpf_tx_queue_release(dev->data->tx_queues[qid]);
+	idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 void
@@ -776,7 +776,7 @@  idpf_set_rx_function(struct rte_eth_dev *dev)
 		if (vport->rx_vec_allowed) {
 			for (i = 0; i < dev->data->nb_tx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
-				(void)idpf_singleq_rx_vec_setup(rxq);
+				(void)idpf_qc_singleq_rx_vec_setup(rxq);
 			}
 #ifdef CC_AVX512_SUPPORT
 			if (vport->rx_use_avx512) {
@@ -835,7 +835,7 @@  idpf_set_tx_function(struct rte_eth_dev *dev)
 					txq = dev->data->tx_queues[i];
 					if (txq == NULL)
 						continue;
-					idpf_singleq_tx_vec_setup_avx512(txq);
+					idpf_qc_singleq_tx_vec_avx512_setup(txq);
 				}
 				dev->tx_pkt_burst = idpf_singleq_xmit_pkts_avx512;
 				dev->tx_pkt_prepare = idpf_prep_pkts;