[01/11] net/hns3: support different numbered Rx and Tx queues
diff mbox series

Message ID 20200109031559.63194-2-huwei013@chinasoftinc.com
State Accepted, archived
Delegated to: Ferruh Yigit
Headers show
Series
  • misc updates and fixes for hns3 PMD driver
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/iol-nxp-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/checkpatch warning coding style issues

Commit Message

Wei Hu (Xavier) Jan. 9, 2020, 3:15 a.m. UTC
From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

Hardware does not support individually enable/disable/reset the Tx or Rx
queue in hns3 network engine, driver must enable/disable/reset Tx and Rx
queues at the same time.

Currently, hns3 PMD driver does not support the scenarios as below:
1) When calling the following function, the input parameter nb_rx_q and
   nb_tx_q are not equal.
     rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q,
                      uint16_t nb_tx_q,
		      const struct rte_eth_conf *dev_conf);
2) When calling the following functions to setup queues, the cumulatively
   setupped Rx queues are not the same as the setupped Tx queues.
     rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,,,);
     rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,,,);
However, these are common usage scenarios in some applications, such as,
l3fwd, ip_ressmbly and OVS-DPDK, etc.

This patch adds support for this usage of these functions by setupping
fake Tx or Rx queues to adjust numbers of Tx/Rx queues. But these fake
queues are imperceptible, and can not be used by upper applications.

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_dcb.c       |  88 ++--
 drivers/net/hns3/hns3_dcb.h       |   4 +-
 drivers/net/hns3/hns3_ethdev.c    |  56 +--
 drivers/net/hns3/hns3_ethdev.h    |  16 +-
 drivers/net/hns3/hns3_ethdev_vf.c |  68 +--
 drivers/net/hns3/hns3_flow.c      |   9 +-
 drivers/net/hns3/hns3_rxtx.c      | 675 +++++++++++++++++++++++++-----
 drivers/net/hns3/hns3_rxtx.h      |  11 +
 8 files changed, 734 insertions(+), 193 deletions(-)

Patch
diff mbox series

diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 19235dfb9..369a40e6a 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -578,17 +578,33 @@  hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
 }
 
 void
-hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)
+hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
+{
+	uint16_t rx_qnum_per_tc;
+
+	rx_qnum_per_tc = nb_rx_q / hw->num_tc;
+	rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
+	if (hw->alloc_rss_size != rx_qnum_per_tc) {
+		hns3_info(hw, "rss size changes from %u to %u",
+			  hw->alloc_rss_size, rx_qnum_per_tc);
+		hw->alloc_rss_size = rx_qnum_per_tc;
+	}
+	hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
+}
+
+void
+hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)
 {
 	struct hns3_tc_queue_info *tc_queue;
 	uint8_t i;
 
+	hw->tx_qnum_per_tc = nb_queue / hw->num_tc;
 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
 		tc_queue = &hw->tc_queue[i];
 		if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
 			tc_queue->enable = true;
-			tc_queue->tqp_offset = i * hw->alloc_rss_size;
-			tc_queue->tqp_count = hw->alloc_rss_size;
+			tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
+			tc_queue->tqp_count = hw->tx_qnum_per_tc;
 			tc_queue->tc = i;
 		} else {
 			/* Set to default queue if TC is disable */
@@ -598,30 +614,22 @@  hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)
 			tc_queue->tc = 0;
 		}
 	}
+	hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;
 }
 
 static void
-hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t queue_num)
+hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
+				 uint16_t nb_tx_q)
 {
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_pf *pf = &hns->pf;
-	uint16_t tqpnum_per_tc;
-	uint16_t alloc_tqps;
-
-	alloc_tqps = RTE_MIN(hw->tqps_num, queue_num);
-	hw->num_tc = RTE_MIN(alloc_tqps, hw->dcb_info.num_tc);
-	tqpnum_per_tc = RTE_MIN(hw->rss_size_max, alloc_tqps / hw->num_tc);
 
-	if (hw->alloc_rss_size != tqpnum_per_tc) {
-		PMD_INIT_LOG(INFO, "rss size changes from %d to %d",
-			     hw->alloc_rss_size, tqpnum_per_tc);
-		hw->alloc_rss_size = tqpnum_per_tc;
-	}
-	hw->alloc_tqps = hw->num_tc * hw->alloc_rss_size;
+	hw->num_tc = hw->dcb_info.num_tc;
+	hns3_set_rss_size(hw, nb_rx_q);
+	hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
 
-	hns3_tc_queue_mapping_cfg(hw);
-
-	memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
+	if (!hns->is_vf)
+		memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
 }
 
 int
@@ -1309,20 +1317,35 @@  hns3_dcb_info_cfg(struct hns3_adapter *hns)
 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
 		hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
 
-	hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues);
+	hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
+					 hw->data->nb_tx_queues);
 }
 
-static void
+static int
 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
 {
 	struct hns3_pf *pf = &hns->pf;
 	struct hns3_hw *hw = &hns->hw;
+	uint16_t nb_rx_q = hw->data->nb_rx_queues;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	uint8_t bit_map = 0;
 	uint8_t i;
 
 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
 	    hw->dcb_info.num_pg != 1)
-		return;
+		return -EINVAL;
+
+	if (nb_rx_q < num_tc) {
+		hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+			 nb_rx_q, num_tc);
+		return -EINVAL;
+	}
+
+	if (nb_tx_q < num_tc) {
+		hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+			 nb_tx_q, num_tc);
+		return -EINVAL;
+	}
 
 	/* Currently not support uncontinuous tc */
 	hw->dcb_info.num_tc = num_tc;
@@ -1333,10 +1356,10 @@  hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
 		bit_map = 1;
 		hw->dcb_info.num_tc = 1;
 	}
-
 	hw->hw_tc_map = bit_map;
-
 	hns3_dcb_info_cfg(hns);
+
+	return 0;
 }
 
 static int
@@ -1422,10 +1445,15 @@  hns3_dcb_configure(struct hns3_adapter *hns)
 
 	hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
 	if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
-		hns3_dcb_info_update(hns, num_tc);
+		ret = hns3_dcb_info_update(hns, num_tc);
+		if (ret) {
+			hns3_err(hw, "dcb info update failed: %d", ret);
+			return ret;
+		}
+
 		ret = hns3_dcb_hw_configure(hns);
 		if (ret) {
-			hns3_err(hw, "dcb sw configure fails: %d", ret);
+			hns3_err(hw, "dcb sw configure failed: %d", ret);
 			return ret;
 		}
 	}
@@ -1479,7 +1507,8 @@  hns3_dcb_init(struct hns3_hw *hw)
 			hns3_err(hw, "dcb info init failed: %d", ret);
 			return ret;
 		}
-		hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num);
+		hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,
+						 hw->tqps_num);
 	}
 
 	/*
@@ -1502,10 +1531,11 @@  static int
 hns3_update_queue_map_configure(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
-	uint16_t queue_num = hw->data->nb_rx_queues;
+	uint16_t nb_rx_q = hw->data->nb_rx_queues;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	int ret;
 
-	hns3_dcb_update_tc_queue_mapping(hw, queue_num);
+	hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
 	ret = hns3_q_to_qs_map(hw);
 	if (ret) {
 		hns3_err(hw, "failed to map nq to qs! ret = %d", ret);
diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index 9ec4e704b..9c2c5f21c 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -159,7 +159,9 @@  hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
 int
 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf);
 
-void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw);
+void hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q);
+
+void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue);
 
 int hns3_dcb_cfg_update(struct hns3_adapter *hns);
 
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 49aef7dbc..800fa47cc 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2061,10 +2061,11 @@  hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
 static int
 hns3_dev_configure(struct rte_eth_dev *dev)
 {
-	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+	struct hns3_adapter *hns = dev->data->dev_private;
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 	struct rte_eth_rss_conf rss_conf;
@@ -2072,23 +2073,28 @@  hns3_dev_configure(struct rte_eth_dev *dev)
 	int ret;
 
 	/*
-	 * Hardware does not support where the number of rx and tx queues is
-	 * not equal in hip08.
+	 * Hardware does not support individually enable/disable/reset the Tx or
+	 * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+	 * and Rx queues at the same time. When the numbers of Tx queues
+	 * allocated by upper applications are not equal to the numbers of Rx
+	 * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+	 * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+	 * these fake queues are imperceptible, and can not be used by upper
+	 * applications.
 	 */
-	if (nb_rx_q != nb_tx_q) {
-		hns3_err(hw,
-			 "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
-			 "Hardware does not support this configuration!",
-			 nb_rx_q, nb_tx_q);
-		return -EINVAL;
+	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+	if (ret) {
+		hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+		return ret;
 	}
 
+	hw->adapter_state = HNS3_NIC_CONFIGURING;
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto cfg_err;
 	}
 
-	hw->adapter_state = HNS3_NIC_CONFIGURING;
 	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
 		ret = hns3_check_dcb_cfg(dev);
 		if (ret)
@@ -2134,7 +2140,9 @@  hns3_dev_configure(struct rte_eth_dev *dev)
 	return 0;
 
 cfg_err:
+	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
+
 	return ret;
 }
 
@@ -4084,7 +4092,7 @@  hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	/* check and configure queue intr-vector mapping */
 	if (rte_intr_cap_multiple(intr_handle) ||
 	    !RTE_ETH_DEV_SRIOV(dev).active) {
-		intr_vector = dev->data->nb_rx_queues;
+		intr_vector = hw->used_rx_queues;
 		/* creates event fd for each intr vector when MSIX is used */
 		if (rte_intr_efd_enable(intr_handle, intr_vector))
 			return -EINVAL;
@@ -4092,10 +4100,10 @@  hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
 		intr_handle->intr_vec =
 			rte_zmalloc("intr_vec",
-				    dev->data->nb_rx_queues * sizeof(int), 0);
+				    hw->used_rx_queues * sizeof(int), 0);
 		if (intr_handle->intr_vec == NULL) {
 			hns3_err(hw, "Failed to allocate %d rx_queues"
-				     " intr_vec", dev->data->nb_rx_queues);
+				     " intr_vec", hw->used_rx_queues);
 			ret = -ENOMEM;
 			goto alloc_intr_vec_error;
 		}
@@ -4106,7 +4114,7 @@  hns3_map_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
 			if (ret)
 				goto bind_vector_error;
@@ -4190,6 +4198,8 @@  hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
 	uint8_t base = 0;
 	uint8_t vec = 0;
 	uint16_t q_id;
@@ -4203,7 +4213,7 @@  hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
 				vec++;
@@ -4446,15 +4456,13 @@  hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 	for (i = 0; i < dcb_info->nb_tcs; i++)
 		dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
 
-	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
-		dcb_info->tc_queue.tc_rxq[0][i].base =
-					hw->tc_queue[i].tqp_offset;
+	for (i = 0; i < hw->num_tc; i++) {
+		dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
 		dcb_info->tc_queue.tc_txq[0][i].base =
-					hw->tc_queue[i].tqp_offset;
-		dcb_info->tc_queue.tc_rxq[0][i].nb_queue =
-					hw->tc_queue[i].tqp_count;
+						hw->tc_queue[i].tqp_offset;
+		dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
 		dcb_info->tc_queue.tc_txq[0][i].nb_queue =
-					hw->tc_queue[i].tqp_count;
+						hw->tc_queue[i].tqp_count;
 	}
 	rte_spinlock_unlock(&hw->lock);
 
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 7422706a8..2aa4c3cd7 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -153,6 +153,12 @@  struct hns3_mac {
 	uint32_t link_speed;      /* ETH_SPEED_NUM_ */
 };
 
+struct hns3_fake_queue_data {
+	void **rx_queues; /* Array of pointers to fake RX queues. */
+	void **tx_queues; /* Array of pointers to fake TX queues. */
+	uint16_t nb_fake_rx_queues; /* Number of fake RX queues. */
+	uint16_t nb_fake_tx_queues; /* Number of fake TX queues. */
+};
 
 /* Primary process maintains driver state in main thread.
  *
@@ -365,8 +371,14 @@  struct hns3_hw {
 	struct hns3_dcb_info dcb_info;
 	enum hns3_fc_status current_fc_status; /* current flow control status */
 	struct hns3_tc_queue_info tc_queue[HNS3_MAX_TC_NUM];
-	uint16_t alloc_tqps;
-	uint16_t alloc_rss_size;    /* Queue number per TC */
+	uint16_t used_rx_queues;
+	uint16_t used_tx_queues;
+
+	/* Config max queue numbers between rx and tx queues from user */
+	uint16_t cfg_max_queues;
+	struct hns3_fake_queue_data fkq_data;     /* fake queue data */
+	uint16_t alloc_rss_size;    /* RX queue number per TC */
+	uint16_t tx_qnum_per_tc;    /* TX queue number per TC */
 
 	uint32_t flag;
 	/*
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 10969011b..71e358e81 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -428,24 +428,28 @@  hns3vf_dev_configure(struct rte_eth_dev *dev)
 	int ret;
 
 	/*
-	 * Hardware does not support where the number of rx and tx queues is
-	 * not equal in hip08.
+	 * Hardware does not support individually enable/disable/reset the Tx or
+	 * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
+	 * and Rx queues at the same time. When the numbers of Tx queues
+	 * allocated by upper applications are not equal to the numbers of Rx
+	 * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
+	 * of Tx/Rx queues. otherwise, network engine can not work as usual. But
+	 * these fake queues are imperceptible, and can not be used by upper
+	 * applications.
 	 */
-	if (nb_rx_q != nb_tx_q) {
-		hns3_err(hw,
-			 "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
-			 "Hardware does not support this configuration!",
-			 nb_rx_q, nb_tx_q);
-		return -EINVAL;
+	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+	if (ret) {
+		hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
+		return ret;
 	}
 
+	hw->adapter_state = HNS3_NIC_CONFIGURING;
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
 		hns3_err(hw, "setting link speed/duplex not supported");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto cfg_err;
 	}
 
-	hw->adapter_state = HNS3_NIC_CONFIGURING;
-
 	/* When RSS is not configured, redirect the packet queue 0 */
 	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
 		rss_conf = conf->rx_adv_conf.rss_conf;
@@ -484,7 +488,9 @@  hns3vf_dev_configure(struct rte_eth_dev *dev)
 	return 0;
 
 cfg_err:
+	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
+
 	return ret;
 }
 
@@ -799,12 +805,12 @@  hns3vf_get_configuration(struct hns3_hw *hw)
 	return hns3vf_get_tc_info(hw);
 }
 
-static void
+static int
 hns3vf_set_tc_info(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
-	uint16_t new_tqps;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	uint8_t i;
 
 	hw->num_tc = 0;
@@ -812,11 +818,22 @@  hns3vf_set_tc_info(struct hns3_adapter *hns)
 		if (hw->hw_tc_map & BIT(i))
 			hw->num_tc++;
 
-	new_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);
-	hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);
-	hw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;
+	if (nb_rx_q < hw->num_tc) {
+		hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+			 nb_rx_q, hw->num_tc);
+		return -EINVAL;
+	}
+
+	if (nb_tx_q < hw->num_tc) {
+		hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+			 nb_tx_q, hw->num_tc);
+		return -EINVAL;
+	}
 
-	hns3_tc_queue_mapping_cfg(hw);
+	hns3_set_rss_size(hw, nb_rx_q);
+	hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
+
+	return 0;
 }
 
 static void
@@ -1256,6 +1273,7 @@  hns3vf_do_stop(struct hns3_adapter *hns)
 static void
 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint8_t base = 0;
@@ -1271,7 +1289,7 @@  hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			(void)hns3vf_bind_ring_with_vector(dev, vec, false,
 							   q_id);
 			if (vec < base + intr_handle->nb_efd - 1)
@@ -1381,7 +1399,9 @@  hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	hns3vf_set_tc_info(hns);
+	ret = hns3vf_set_tc_info(hns);
+	if (ret)
+		return ret;
 
 	ret = hns3_start_queues(hns, reset_queue);
 	if (ret) {
@@ -1412,8 +1432,8 @@  hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 
 	/* check and configure queue intr-vector mapping */
 	if (rte_intr_cap_multiple(intr_handle) ||
-		!RTE_ETH_DEV_SRIOV(dev).active) {
-		intr_vector = dev->data->nb_rx_queues;
+	    !RTE_ETH_DEV_SRIOV(dev).active) {
+		intr_vector = hw->used_rx_queues;
 		/* It creates event fd for each intr vector when MSIX is used */
 		if (rte_intr_efd_enable(intr_handle, intr_vector))
 			return -EINVAL;
@@ -1421,10 +1441,10 @@  hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
 		intr_handle->intr_vec =
 			rte_zmalloc("intr_vec",
-				    dev->data->nb_rx_queues * sizeof(int), 0);
+				    hw->used_rx_queues * sizeof(int), 0);
 		if (intr_handle->intr_vec == NULL) {
 			hns3_err(hw, "Failed to allocate %d rx_queues"
-				     " intr_vec", dev->data->nb_rx_queues);
+				     " intr_vec", hw->used_rx_queues);
 			ret = -ENOMEM;
 			goto vf_alloc_intr_vec_error;
 		}
@@ -1435,7 +1455,7 @@  hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 		base = RTE_INTR_VEC_RXTX_OFFSET;
 	}
 	if (rte_intr_dp_is_en(intr_handle)) {
-		for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
 			ret = hns3vf_bind_ring_with_vector(dev, vec, true,
 							   q_id);
 			if (ret)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index bcd121f48..aa614175d 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -224,14 +224,19 @@  hns3_handle_action_queue(struct rte_eth_dev *dev,
 			 struct rte_flow_error *error)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
-	struct hns3_hw *hw = &hns->hw;
 	const struct rte_flow_action_queue *queue;
+	struct hns3_hw *hw = &hns->hw;
 
 	queue = (const struct rte_flow_action_queue *)action->conf;
-	if (queue->index >= hw->data->nb_rx_queues)
+	if (queue->index >= hw->used_rx_queues) {
+		hns3_err(hw, "queue ID(%d) is greater than number of "
+			  "available queue (%d) in driver.",
+			  queue->index, hw->used_rx_queues);
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
 					  "Invalid queue ID in PF");
+	}
+
 	rule->queue_id = queue->index;
 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
 	return 0;
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 003a5bde4..3d13ed526 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -37,6 +37,7 @@  hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
 {
 	uint16_t i;
 
+	/* Note: Fake rx queue will not enter here */
 	if (rxq->sw_ring) {
 		for (i = 0; i < rxq->nb_rx_desc; i++) {
 			if (rxq->sw_ring[i].mbuf) {
@@ -52,6 +53,7 @@  hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
 {
 	uint16_t i;
 
+	/* Note: Fake rx queue will not enter here */
 	if (txq->sw_ring) {
 		for (i = 0; i < txq->nb_tx_desc; i++) {
 			if (txq->sw_ring[i].mbuf) {
@@ -120,22 +122,115 @@  hns3_dev_tx_queue_release(void *queue)
 	rte_spinlock_unlock(&hns->hw.lock);
 }
 
-void
-hns3_free_all_queues(struct rte_eth_dev *dev)
+static void
+hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
+{
+	struct hns3_rx_queue *rxq = queue;
+	struct hns3_adapter *hns;
+	struct hns3_hw *hw;
+	uint16_t idx;
+
+	if (rxq == NULL)
+		return;
+
+	hns = rxq->hns;
+	hw = &hns->hw;
+	idx = rxq->queue_id;
+	if (hw->fkq_data.rx_queues[idx]) {
+		hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+		hw->fkq_data.rx_queues[idx] = NULL;
+	}
+
+	/* free fake rx queue arrays */
+	if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
+		hw->fkq_data.nb_fake_rx_queues = 0;
+		rte_free(hw->fkq_data.rx_queues);
+		hw->fkq_data.rx_queues = NULL;
+	}
+}
+
+static void
+hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
 {
+	struct hns3_tx_queue *txq = queue;
+	struct hns3_adapter *hns;
+	struct hns3_hw *hw;
+	uint16_t idx;
+
+	if (txq == NULL)
+		return;
+
+	hns = txq->hns;
+	hw = &hns->hw;
+	idx = txq->queue_id;
+	if (hw->fkq_data.tx_queues[idx]) {
+		hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+		hw->fkq_data.tx_queues[idx] = NULL;
+	}
+
+	/* free fake tx queue arrays */
+	if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
+		hw->fkq_data.nb_fake_tx_queues = 0;
+		rte_free(hw->fkq_data.tx_queues);
+		hw->fkq_data.tx_queues = NULL;
+	}
+}
+
+static void
+hns3_free_rx_queues(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_fake_queue_data *fkq_data;
+	struct hns3_hw *hw = &hns->hw;
+	uint16_t nb_rx_q;
 	uint16_t i;
 
-	if (dev->data->rx_queues)
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+	nb_rx_q = hw->data->nb_rx_queues;
+	for (i = 0; i < nb_rx_q; i++) {
+		if (dev->data->rx_queues[i]) {
 			hns3_rx_queue_release(dev->data->rx_queues[i]);
 			dev->data->rx_queues[i] = NULL;
 		}
+	}
+
+	/* Free fake Rx queues */
+	fkq_data = &hw->fkq_data;
+	for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
+		if (fkq_data->rx_queues[i])
+			hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
+	}
+}
 
-	if (dev->data->tx_queues)
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+static void
+hns3_free_tx_queues(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_fake_queue_data *fkq_data;
+	struct hns3_hw *hw = &hns->hw;
+	uint16_t nb_tx_q;
+	uint16_t i;
+
+	nb_tx_q = hw->data->nb_tx_queues;
+	for (i = 0; i < nb_tx_q; i++) {
+		if (dev->data->tx_queues[i]) {
 			hns3_tx_queue_release(dev->data->tx_queues[i]);
 			dev->data->tx_queues[i] = NULL;
 		}
+	}
+
+	/* Free fake Tx queues */
+	fkq_data = &hw->fkq_data;
+	for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
+		if (fkq_data->tx_queues[i])
+			hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
+	}
+}
+
+void
+hns3_free_all_queues(struct rte_eth_dev *dev)
+{
+	hns3_free_rx_queues(dev);
+	hns3_free_tx_queues(dev);
 }
 
 static int
@@ -223,17 +318,26 @@  hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
 static void
 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
 {
+	uint16_t nb_rx_q = hw->data->nb_rx_queues;
+	uint16_t nb_tx_q = hw->data->nb_tx_queues;
 	struct hns3_rx_queue *rxq;
 	struct hns3_tx_queue *txq;
 	uint32_t rcb_reg;
 	int i;
 
-	for (i = 0; i < hw->data->nb_rx_queues; i++) {
-		rxq = hw->data->rx_queues[i];
-		txq = hw->data->tx_queues[i];
+	for (i = 0; i < hw->cfg_max_queues; i++) {
+		if (i < nb_rx_q)
+			rxq = hw->data->rx_queues[i];
+		else
+			rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
+		if (i < nb_tx_q)
+			txq = hw->data->tx_queues[i];
+		else
+			txq = hw->fkq_data.tx_queues[i - nb_tx_q];
 		if (rxq == NULL || txq == NULL ||
 		    (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
 			continue;
+
 		rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
 		if (en)
 			rcb_reg |= BIT(HNS3_RING_EN_B);
@@ -382,10 +486,9 @@  int
 hns3_reset_all_queues(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
-	int ret;
-	uint16_t i;
+	int ret, i;
 
-	for (i = 0; i < hw->data->nb_rx_queues; i++) {
+	for (i = 0; i < hw->cfg_max_queues; i++) {
 		ret = hns3_reset_queue(hns, i);
 		if (ret) {
 			hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
@@ -445,12 +548,11 @@  hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 
 	PMD_INIT_FUNC_TRACE();
 
-	rxq = hw->data->rx_queues[idx];
-
+	rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
 	ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
 	if (ret) {
 		hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
-			    idx, ret);
+			 idx, ret);
 		return ret;
 	}
 
@@ -462,15 +564,24 @@  hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 }
 
 static void
-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 {
 	struct hns3_hw *hw = &hns->hw;
-	struct hns3_tx_queue *txq;
+	struct hns3_rx_queue *rxq;
+
+	rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
+	rxq->next_to_use = 0;
+	rxq->next_to_clean = 0;
+	hns3_init_rx_queue_hw(rxq);
+}
+
+static void
+hns3_init_tx_queue(struct hns3_tx_queue *queue)
+{
+	struct hns3_tx_queue *txq = queue;
 	struct hns3_desc *desc;
 	int i;
 
-	txq = hw->data->tx_queues[idx];
-
 	/* Clear tx bd */
 	desc = txq->tx_ring;
 	for (i = 0; i < txq->nb_tx_desc; i++) {
@@ -480,10 +591,30 @@  hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
 
 	txq->next_to_use = 0;
 	txq->next_to_clean = 0;
-	txq->tx_bd_ready   = txq->nb_tx_desc;
+	txq->tx_bd_ready = txq->nb_tx_desc;
 	hns3_init_tx_queue_hw(txq);
 }
 
+static void
+hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_tx_queue *txq;
+
+	txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
+	hns3_init_tx_queue(txq);
+}
+
+static void
+hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_tx_queue *txq;
+
+	txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
+	hns3_init_tx_queue(txq);
+}
+
 static void
 hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 {
@@ -500,7 +631,7 @@  hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 
 		for (j = 0; j < tc_queue->tqp_count; j++) {
 			num = tc_queue->tqp_offset + j;
-			txq = hw->data->tx_queues[num];
+			txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
 			if (txq == NULL)
 				continue;
 
@@ -509,16 +640,13 @@  hns3_init_tx_ring_tc(struct hns3_adapter *hns)
 	}
 }
 
-int
-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+static int
+hns3_start_rx_queues(struct hns3_adapter *hns)
 {
 	struct hns3_hw *hw = &hns->hw;
-	struct rte_eth_dev_data *dev_data = hw->data;
 	struct hns3_rx_queue *rxq;
-	struct hns3_tx_queue *txq;
+	int i, j;
 	int ret;
-	int i;
-	int j;
 
 	/* Initialize RSS for queues */
 	ret = hns3_config_rss(hns);
@@ -527,49 +655,85 @@  hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
 		return ret;
 	}
 
-	if (reset_queue) {
-		ret = hns3_reset_all_queues(hns);
-		if (ret) {
-			hns3_err(hw, "Failed to reset all queues %d", ret);
-			return ret;
-		}
-	}
-
-	/*
-	 * Hardware does not support where the number of rx and tx queues is
-	 * not equal in hip08. In .dev_configure callback function we will
-	 * check the two values, here we think that the number of rx and tx
-	 * queues is equal.
-	 */
 	for (i = 0; i < hw->data->nb_rx_queues; i++) {
-		rxq = dev_data->rx_queues[i];
-		txq = dev_data->tx_queues[i];
-		if (rxq == NULL || txq == NULL || rxq->rx_deferred_start ||
-		    txq->tx_deferred_start)
+		rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
+		if (rxq == NULL || rxq->rx_deferred_start)
 			continue;
-
 		ret = hns3_dev_rx_queue_start(hns, i);
 		if (ret) {
 			hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
 				 ret);
 			goto out;
 		}
-		hns3_dev_tx_queue_start(hns, i);
 	}
-	hns3_init_tx_ring_tc(hns);
 
-	hns3_enable_all_queues(hw, true);
+	for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
+		rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
+		if (rxq == NULL || rxq->rx_deferred_start)
+			continue;
+		hns3_fake_rx_queue_start(hns, i);
+	}
 	return 0;
 
 out:
 	for (j = 0; j < i; j++) {
-		rxq = dev_data->rx_queues[j];
+		rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
 		hns3_rx_queue_release_mbufs(rxq);
 	}
 
 	return ret;
 }
 
+static void
+hns3_start_tx_queues(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_tx_queue *txq;
+	int i;
+
+	for (i = 0; i < hw->data->nb_tx_queues; i++) {
+		txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
+		if (txq == NULL || txq->tx_deferred_start)
+			continue;
+		hns3_dev_tx_queue_start(hns, i);
+	}
+
+	for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
+		txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
+		if (txq == NULL || txq->tx_deferred_start)
+			continue;
+		hns3_fake_tx_queue_start(hns, i);
+	}
+
+	hns3_init_tx_ring_tc(hns);
+}
+
+int
+hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	if (reset_queue) {
+		ret = hns3_reset_all_queues(hns);
+		if (ret) {
+			hns3_err(hw, "Failed to reset all queues %d", ret);
+			return ret;
+		}
+	}
+
+	ret = hns3_start_rx_queues(hns);
+	if (ret) {
+		hns3_err(hw, "Failed to start rx queues: %d", ret);
+		return ret;
+	}
+
+	hns3_start_tx_queues(hns);
+	hns3_enable_all_queues(hw, true);
+
+	return 0;
+}
+
 int
 hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
 {
@@ -587,6 +751,337 @@  hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
 	return 0;
 }
 
+static void*
+hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
+			    struct hns3_queue_info *q_info)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	const struct rte_memzone *rx_mz;
+	struct hns3_rx_queue *rxq;
+	unsigned int rx_desc;
+
+	rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
+				 RTE_CACHE_LINE_SIZE, q_info->socket_id);
+	if (rxq == NULL) {
+		hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+			 q_info->idx);
+		return NULL;
+	}
+
+	/* Allocate rx ring hardware descriptors. */
+	rxq->queue_id = q_info->idx;
+	rxq->nb_rx_desc = q_info->nb_desc;
+	rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
+	rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+					 rx_desc, HNS3_RING_BASE_ALIGN,
+					 q_info->socket_id);
+	if (rx_mz == NULL) {
+		hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+			 q_info->idx);
+		hns3_rx_queue_release(rxq);
+		return NULL;
+	}
+	rxq->mz = rx_mz;
+	rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
+	rxq->rx_ring_phys_addr = rx_mz->iova;
+
+	hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+		 rxq->rx_ring_phys_addr);
+
+	return rxq;
+}
+
+static int
+hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+			 uint16_t nb_desc, unsigned int socket_id)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
+	struct hns3_rx_queue *rxq;
+	uint16_t nb_rx_q;
+
+	if (hw->fkq_data.rx_queues[idx]) {
+		hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+		hw->fkq_data.rx_queues[idx] = NULL;
+	}
+
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 fake RX queue";
+	q_info.ring_name = "rx_fake_ring";
+	rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
+	if (rxq == NULL) {
+		hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+		return -ENOMEM;
+	}
+
+	/* Don't need alloc sw_ring, because upper applications don't use it */
+	rxq->sw_ring = NULL;
+
+	rxq->hns = hns;
+	rxq->rx_deferred_start = false;
+	rxq->port_id = dev->data->port_id;
+	rxq->configured = true;
+	nb_rx_q = dev->data->nb_rx_queues;
+	rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+				(nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
+	rxq->rx_buf_len = hw->rx_buf_len;
+
+	rte_spinlock_lock(&hw->lock);
+	hw->fkq_data.rx_queues[idx] = rxq;
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static void*
+hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
+			    struct hns3_queue_info *q_info)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	const struct rte_memzone *tx_mz;
+	struct hns3_tx_queue *txq;
+	struct hns3_desc *desc;
+	unsigned int tx_desc;
+	int i;
+
+	txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
+				 RTE_CACHE_LINE_SIZE, q_info->socket_id);
+	if (txq == NULL) {
+		hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+			 q_info->idx);
+		return NULL;
+	}
+
+	/* Allocate tx ring hardware descriptors. */
+	txq->queue_id = q_info->idx;
+	txq->nb_tx_desc = q_info->nb_desc;
+	tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
+	tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+					 tx_desc, HNS3_RING_BASE_ALIGN,
+					 q_info->socket_id);
+	if (tx_mz == NULL) {
+		hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+			 q_info->idx);
+		hns3_tx_queue_release(txq);
+		return NULL;
+	}
+	txq->mz = tx_mz;
+	txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
+	txq->tx_ring_phys_addr = tx_mz->iova;
+
+	hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+		 txq->tx_ring_phys_addr);
+
+	/* Clear tx bd */
+	desc = txq->tx_ring;
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		desc->tx.tp_fe_sc_vld_ra_ri = 0;
+		desc++;
+	}
+
+	return txq;
+}
+
+static int
+hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+			 uint16_t nb_desc, unsigned int socket_id)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
+	struct hns3_tx_queue *txq;
+	uint16_t nb_tx_q;
+
+	if (hw->fkq_data.tx_queues[idx] != NULL) {
+		hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+		hw->fkq_data.tx_queues[idx] = NULL;
+	}
+
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 fake TX queue";
+	q_info.ring_name = "tx_fake_ring";
+	txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
+	if (txq == NULL) {
+		hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+		return -ENOMEM;
+	}
+
+	/* Don't need alloc sw_ring, because upper applications don't use it */
+	txq->sw_ring = NULL;
+
+	txq->hns = hns;
+	txq->tx_deferred_start = false;
+	txq->port_id = dev->data->port_id;
+	txq->configured = true;
+	nb_tx_q = dev->data->nb_tx_queues;
+	txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+				(nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
+
+	rte_spinlock_lock(&hw->lock);
+	hw->fkq_data.tx_queues[idx] = txq;
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+	uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
+	void **rxq;
+	uint8_t i;
+
+	if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
+		/* first time configuration */
+
+		uint32_t size;
+		size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
+		hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
+						     RTE_CACHE_LINE_SIZE);
+		if (hw->fkq_data.rx_queues == NULL) {
+			hw->fkq_data.nb_fake_rx_queues = 0;
+			return -ENOMEM;
+		}
+	} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
+		/* re-configure */
+
+		rxq = hw->fkq_data.rx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_rx_queue_release(rxq[i]);
+
+		rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+				  RTE_CACHE_LINE_SIZE);
+		if (rxq == NULL)
+			return -ENOMEM;
+		if (nb_queues > old_nb_queues) {
+			uint16_t new_qs = nb_queues - old_nb_queues;
+			memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
+		}
+
+		hw->fkq_data.rx_queues = rxq;
+	} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
+		rxq = hw->fkq_data.rx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_rx_queue_release(rxq[i]);
+
+		rte_free(hw->fkq_data.rx_queues);
+		hw->fkq_data.rx_queues = NULL;
+	}
+
+	hw->fkq_data.nb_fake_rx_queues = nb_queues;
+
+	return 0;
+}
+
+static int
+hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+	uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
+	void **txq;
+	uint8_t i;
+
+	if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
+		/* first time configuration */
+
+		uint32_t size;
+		size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
+		hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
+						     RTE_CACHE_LINE_SIZE);
+		if (hw->fkq_data.tx_queues == NULL) {
+			hw->fkq_data.nb_fake_tx_queues = 0;
+			return -ENOMEM;
+		}
+	} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
+		/* re-configure */
+
+		txq = hw->fkq_data.tx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_tx_queue_release(txq[i]);
+		txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+				  RTE_CACHE_LINE_SIZE);
+		if (txq == NULL)
+			return -ENOMEM;
+		if (nb_queues > old_nb_queues) {
+			uint16_t new_qs = nb_queues - old_nb_queues;
+			memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
+		}
+
+		hw->fkq_data.tx_queues = txq;
+	} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
+		txq = hw->fkq_data.tx_queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			hns3_dev_tx_queue_release(txq[i]);
+
+		rte_free(hw->fkq_data.tx_queues);
+		hw->fkq_data.tx_queues = NULL;
+	}
+	hw->fkq_data.nb_fake_tx_queues = nb_queues;
+
+	return 0;
+}
+
+int
+hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+			      uint16_t nb_tx_q)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint16_t rx_need_add_nb_q;
+	uint16_t tx_need_add_nb_q;
+	uint16_t port_id;
+	uint16_t q;
+	int ret;
+
+	/* Setup new number of fake RX/TX queues and reconfigure device. */
+	hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
+	rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
+	tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
+	ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
+	if (ret) {
+		hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+		goto cfg_fake_rx_q_fail;
+	}
+
+	ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
+	if (ret) {
+		hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+		goto cfg_fake_tx_q_fail;
+	}
+
+	/* Allocate and set up fake RX queue per Ethernet port. */
+	port_id = hw->data->port_id;
+	for (q = 0; q < rx_need_add_nb_q; q++) {
+		ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+					       rte_eth_dev_socket_id(port_id));
+		if (ret)
+			goto setup_fake_rx_q_fail;
+	}
+
+	/* Allocate and set up fake TX queue per Ethernet port. */
+	for (q = 0; q < tx_need_add_nb_q; q++) {
+		ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+					       rte_eth_dev_socket_id(port_id));
+		if (ret)
+			goto setup_fake_tx_q_fail;
+	}
+
+	return 0;
+
+setup_fake_tx_q_fail:
+setup_fake_rx_q_fail:
+	(void)hns3_fake_tx_queue_config(hw, 0);
+cfg_fake_tx_q_fail:
+	(void)hns3_fake_rx_queue_config(hw, 0);
+cfg_fake_rx_q_fail:
+	hw->cfg_max_queues = 0;
+
+	return ret;
+}
+
 void
 hns3_dev_release_mbufs(struct hns3_adapter *hns)
 {
@@ -618,11 +1113,9 @@  hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		    struct rte_mempool *mp)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
-	const struct rte_memzone *rx_mz;
 	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
 	struct hns3_rx_queue *rxq;
-	unsigned int desc_size = sizeof(struct hns3_desc);
-	unsigned int rx_desc;
 	int rx_entry_len;
 
 	if (dev->data->dev_started) {
@@ -642,17 +1135,20 @@  hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		dev->data->rx_queues[idx] = NULL;
 	}
 
-	rxq = rte_zmalloc_socket("hns3 RX queue", sizeof(struct hns3_rx_queue),
-				 RTE_CACHE_LINE_SIZE, socket_id);
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 RX queue";
+	q_info.ring_name = "rx_ring";
+	rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
 	if (rxq == NULL) {
-		hns3_err(hw, "Failed to allocate memory for rx queue!");
+		hns3_err(hw,
+			 "Failed to alloc mem and reserve DMA mem for rx ring!");
 		return -ENOMEM;
 	}
 
 	rxq->hns = hns;
 	rxq->mb_pool = mp;
-	rxq->nb_rx_desc = nb_desc;
-	rxq->queue_id = idx;
 	if (conf->rx_free_thresh <= 0)
 		rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
 	else
@@ -668,23 +1164,6 @@  hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		return -ENOMEM;
 	}
 
-	/* Allocate rx ring hardware descriptors. */
-	rx_desc = rxq->nb_rx_desc * desc_size;
-	rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", idx, rx_desc,
-					 HNS3_RING_BASE_ALIGN, socket_id);
-	if (rx_mz == NULL) {
-		hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
-			 idx);
-		hns3_rx_queue_release(rxq);
-		return -ENOMEM;
-	}
-	rxq->mz = rx_mz;
-	rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
-	rxq->rx_ring_phys_addr = rx_mz->iova;
-
-	hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, idx,
-		 rxq->rx_ring_phys_addr);
-
 	rxq->next_to_use = 0;
 	rxq->next_to_clean = 0;
 	rxq->nb_rx_hold = 0;
@@ -1062,14 +1541,10 @@  hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		    unsigned int socket_id, const struct rte_eth_txconf *conf)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
-	const struct rte_memzone *tx_mz;
 	struct hns3_hw *hw = &hns->hw;
+	struct hns3_queue_info q_info;
 	struct hns3_tx_queue *txq;
-	struct hns3_desc *desc;
-	unsigned int desc_size = sizeof(struct hns3_desc);
-	unsigned int tx_desc;
 	int tx_entry_len;
-	int i;
 
 	if (dev->data->dev_started) {
 		hns3_err(hw, "tx_queue_setup after dev_start no supported");
@@ -1088,17 +1563,19 @@  hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		dev->data->tx_queues[idx] = NULL;
 	}
 
-	txq = rte_zmalloc_socket("hns3 TX queue", sizeof(struct hns3_tx_queue),
-				 RTE_CACHE_LINE_SIZE, socket_id);
+	q_info.idx = idx;
+	q_info.socket_id = socket_id;
+	q_info.nb_desc = nb_desc;
+	q_info.type = "hns3 TX queue";
+	q_info.ring_name = "tx_ring";
+	txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
 	if (txq == NULL) {
-		hns3_err(hw, "Failed to allocate memory for tx queue!");
+		hns3_err(hw,
+			 "Failed to alloc mem and reserve DMA mem for tx ring!");
 		return -ENOMEM;
 	}
 
-	txq->nb_tx_desc = nb_desc;
-	txq->queue_id = idx;
 	txq->tx_deferred_start = conf->tx_deferred_start;
-
 	tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
 	txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
 					  RTE_CACHE_LINE_SIZE, socket_id);
@@ -1108,34 +1585,10 @@  hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 		return -ENOMEM;
 	}
 
-	/* Allocate tx ring hardware descriptors. */
-	tx_desc = txq->nb_tx_desc * desc_size;
-	tx_mz = rte_eth_dma_zone_reserve(dev, "tx_ring", idx, tx_desc,
-					 HNS3_RING_BASE_ALIGN, socket_id);
-	if (tx_mz == NULL) {
-		hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
-			 idx);
-		hns3_tx_queue_release(txq);
-		return -ENOMEM;
-	}
-	txq->mz = tx_mz;
-	txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
-	txq->tx_ring_phys_addr = tx_mz->iova;
-
-	hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, idx,
-		 txq->tx_ring_phys_addr);
-
-	/* Clear tx bd */
-	desc = txq->tx_ring;
-	for (i = 0; i < txq->nb_tx_desc; i++) {
-		desc->tx.tp_fe_sc_vld_ra_ri = 0;
-		desc++;
-	}
-
 	txq->hns = hns;
 	txq->next_to_use = 0;
 	txq->next_to_clean = 0;
-	txq->tx_bd_ready   = txq->nb_tx_desc;
+	txq->tx_bd_ready = txq->nb_tx_desc;
 	txq->port_id = dev->data->port_id;
 	txq->configured = true;
 	txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index cc210268a..a042c9902 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -273,6 +273,14 @@  struct hns3_tx_queue {
 	bool configured;        /* indicate if tx queue has been configured */
 };
 
+struct hns3_queue_info {
+	const char *type;   /* point to queue memory name */
+	const char *ring_name;  /* point to hardware ring name */
+	uint16_t idx;
+	uint16_t nb_desc;
+	unsigned int socket_id;
+};
+
 #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
 	PKT_TX_OUTER_IPV6 | \
 	PKT_TX_OUTER_IPV4 | \
@@ -314,4 +322,7 @@  uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
 void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
+int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+				  uint16_t nb_tx_q);
+
 #endif /* _HNS3_RXTX_H_ */