diff mbox series

[v6,18/19] net/ngbe: add Rx queue start and stop

Message ID 20210617110005.4132926-19-jiawenwu@trustnetic.com (mailing list archive)
State Changes Requested, archived
Delegated to: Andrew Rybchenko
Headers show
Series net: ngbe PMD | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Jiawen Wu June 17, 2021, 11 a.m. UTC
Initializes receive unit, support to start and stop receive unit for
specified queues.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ngbe/base/ngbe_dummy.h |  15 ++
 drivers/net/ngbe/base/ngbe_hw.c    | 105 ++++++++++++++
 drivers/net/ngbe/base/ngbe_hw.h    |   4 +
 drivers/net/ngbe/base/ngbe_type.h  |   4 +
 drivers/net/ngbe/ngbe_ethdev.c     |   5 +
 drivers/net/ngbe/ngbe_ethdev.h     |   6 +
 drivers/net/ngbe/ngbe_rxtx.c       | 215 ++++++++++++++++++++++++++++-
 7 files changed, 351 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ngbe/base/ngbe_dummy.h b/drivers/net/ngbe/base/ngbe_dummy.h
index dfc7b13192..384631b4f1 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -59,6 +59,18 @@  static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1)
 {
 	return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_enable_rx_dma_dummy(struct ngbe_hw *TUP0, u32 TUP1)
+{
+	return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mac_disable_sec_rx_path_dummy(struct ngbe_hw *TUP0)
+{
+	return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mac_enable_sec_rx_path_dummy(struct ngbe_hw *TUP0)
+{
+	return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_acquire_swfw_sync_dummy(struct ngbe_hw *TUP0,
 					u32 TUP1)
 {
@@ -167,6 +179,9 @@  static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
 	hw->mac.start_hw = ngbe_mac_start_hw_dummy;
 	hw->mac.stop_hw = ngbe_mac_stop_hw_dummy;
 	hw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy;
+	hw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy;
+	hw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy;
+	hw->mac.enable_sec_rx_path = ngbe_mac_enable_sec_rx_path_dummy;
 	hw->mac.acquire_swfw_sync = ngbe_mac_acquire_swfw_sync_dummy;
 	hw->mac.release_swfw_sync = ngbe_mac_release_swfw_sync_dummy;
 	hw->mac.setup_link = ngbe_mac_setup_link_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index 22ce4e52b9..45cbfaa76c 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -536,6 +536,63 @@  void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask)
 	ngbe_release_eeprom_semaphore(hw);
 }
 
+/**
+ *  ngbe_disable_sec_rx_path - Stops the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the receive data path and waits for the HW to internally empty
+ *  the Rx security block
+ **/
+s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
+{
+#define NGBE_MAX_SECRX_POLL 4000
+
+	int i;
+	u32 secrxreg;
+
+	DEBUGFUNC("ngbe_disable_sec_rx_path");
+
+
+	secrxreg = rd32(hw, NGBE_SECRXCTL);
+	secrxreg |= NGBE_SECRXCTL_XDSA;
+	wr32(hw, NGBE_SECRXCTL, secrxreg);
+	for (i = 0; i < NGBE_MAX_SECRX_POLL; i++) {
+		secrxreg = rd32(hw, NGBE_SECRXSTAT);
+		if (!(secrxreg & NGBE_SECRXSTAT_RDY))
+			/* Use interrupt-safe sleep just in case */
+			usec_delay(10);
+		else
+			break;
+	}
+
+	/* For informational purposes only */
+	if (i >= NGBE_MAX_SECRX_POLL)
+		DEBUGOUT("Rx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return 0;
+}
+
+/**
+ *  ngbe_enable_sec_rx_path - Enables the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the receive data path.
+ **/
+s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw)
+{
+	u32 secrxreg;
+
+	DEBUGFUNC("ngbe_enable_sec_rx_path");
+
+	secrxreg = rd32(hw, NGBE_SECRXCTL);
+	secrxreg &= ~NGBE_SECRXCTL_XDSA;
+	wr32(hw, NGBE_SECRXCTL, secrxreg);
+	ngbe_flush(hw);
+
+	return 0;
+}
+
 /**
  *  ngbe_clear_vmdq - Disassociate a VMDq pool index from a rx address
  *  @hw: pointer to hardware struct
@@ -756,6 +813,21 @@  void ngbe_disable_rx(struct ngbe_hw *hw)
 	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
 }
 
+void ngbe_enable_rx(struct ngbe_hw *hw)
+{
+	u32 pfdtxgswc;
+
+	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, NGBE_MACRXCFG_ENA);
+	wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
+
+	if (hw->mac.set_lben) {
+		pfdtxgswc = rd32(hw, NGBE_PSRCTL);
+		pfdtxgswc |= NGBE_PSRCTL_LBENA;
+		wr32(hw, NGBE_PSRCTL, pfdtxgswc);
+		hw->mac.set_lben = false;
+	}
+}
+
 /**
  *  ngbe_set_mac_type - Sets MAC type
  *  @hw: pointer to the HW structure
@@ -802,6 +874,36 @@  s32 ngbe_set_mac_type(struct ngbe_hw *hw)
 	return err;
 }
 
+/**
+ *  ngbe_enable_rx_dma - Enable the Rx DMA unit
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit
+ **/
+s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval)
+{
+	DEBUGFUNC("ngbe_enable_rx_dma");
+
+	/*
+	 * Workaround silicon errata when enabling the Rx datapath.
+	 * If traffic is incoming before we enable the Rx unit, it could hang
+	 * the Rx DMA unit.  Therefore, make sure the security engine is
+	 * completely disabled prior to enabling the Rx unit.
+	 */
+
+	hw->mac.disable_sec_rx_path(hw);
+
+	if (regval & NGBE_PBRXCTL_ENA)
+		ngbe_enable_rx(hw);
+	else
+		ngbe_disable_rx(hw);
+
+	hw->mac.enable_sec_rx_path(hw);
+
+	return 0;
+}
+
 void ngbe_map_device_id(struct ngbe_hw *hw)
 {
 	u16 oem = hw->sub_system_id & NGBE_OEM_MASK;
@@ -886,11 +988,14 @@  s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
 	mac->init_hw = ngbe_init_hw;
 	mac->reset_hw = ngbe_reset_hw_em;
 	mac->start_hw = ngbe_start_hw;
+	mac->enable_rx_dma = ngbe_enable_rx_dma;
 	mac->get_mac_addr = ngbe_get_mac_addr;
 	mac->stop_hw = ngbe_stop_hw;
 	mac->acquire_swfw_sync = ngbe_acquire_swfw_sync;
 	mac->release_swfw_sync = ngbe_release_swfw_sync;
 
+	mac->disable_sec_rx_path = ngbe_disable_sec_rx_path;
+	mac->enable_sec_rx_path = ngbe_enable_sec_rx_path;
 	/* RAR */
 	mac->set_rar = ngbe_set_rar;
 	mac->clear_rar = ngbe_clear_rar;
diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h
index 4fee5735ac..01f41fe9b3 100644
--- a/drivers/net/ngbe/base/ngbe_hw.h
+++ b/drivers/net/ngbe/base/ngbe_hw.h
@@ -34,6 +34,8 @@  s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
 			  u32 enable_addr);
 s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index);
 s32 ngbe_init_rx_addrs(struct ngbe_hw *hw);
+s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw);
+s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw);
 
 s32 ngbe_validate_mac_addr(u8 *mac_addr);
 s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask);
@@ -46,10 +48,12 @@  s32 ngbe_init_uta_tables(struct ngbe_hw *hw);
 s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw);
 s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw);
 void ngbe_disable_rx(struct ngbe_hw *hw);
+void ngbe_enable_rx(struct ngbe_hw *hw);
 s32 ngbe_init_shared_code(struct ngbe_hw *hw);
 s32 ngbe_set_mac_type(struct ngbe_hw *hw);
 s32 ngbe_init_ops_pf(struct ngbe_hw *hw);
 s32 ngbe_init_phy(struct ngbe_hw *hw);
+s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval);
 void ngbe_map_device_id(struct ngbe_hw *hw);
 
 #endif /* _NGBE_HW_H_ */
diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h
index 9312a1ee44..a06e6d240e 100644
--- a/drivers/net/ngbe/base/ngbe_type.h
+++ b/drivers/net/ngbe/base/ngbe_type.h
@@ -97,6 +97,9 @@  struct ngbe_mac_info {
 	s32 (*start_hw)(struct ngbe_hw *hw);
 	s32 (*stop_hw)(struct ngbe_hw *hw);
 	s32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr);
+	s32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval);
+	s32 (*disable_sec_rx_path)(struct ngbe_hw *hw);
+	s32 (*enable_sec_rx_path)(struct ngbe_hw *hw);
 	s32 (*acquire_swfw_sync)(struct ngbe_hw *hw, u32 mask);
 	void (*release_swfw_sync)(struct ngbe_hw *hw, u32 mask);
 
@@ -190,6 +193,7 @@  struct ngbe_hw {
 	u16 nb_rx_queues;
 	u16 nb_tx_queues;
 
+	u32 q_rx_regs[8 * 4];
 	u32 q_tx_regs[8 * 4];
 	bool is_pf;
 };
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index bd32f20c38..ef8ea5ecff 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -572,6 +572,8 @@  ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+	dev_info->min_rx_bufsize = 1024;
+	dev_info->max_rx_pktlen = 15872;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -601,6 +603,7 @@  ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 				ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
+	dev_info->default_rxportconf.burst_size = 32;
 	dev_info->default_txportconf.burst_size = 32;
 	dev_info->default_rxportconf.nb_queues = 1;
 	dev_info->default_txportconf.nb_queues = 1;
@@ -1099,6 +1102,8 @@  static const struct eth_dev_ops ngbe_eth_dev_ops = {
 	.dev_start                  = ngbe_dev_start,
 	.dev_stop                   = ngbe_dev_stop,
 	.link_update                = ngbe_dev_link_update,
+	.rx_queue_start	            = ngbe_dev_rx_queue_start,
+	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 40a28711f1..d576eef5d6 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -86,9 +86,15 @@  void ngbe_dev_tx_init(struct rte_eth_dev *dev);
 
 int ngbe_dev_rxtx_start(struct rte_eth_dev *dev);
 
+void ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id);
+void ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id);
 void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
 void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
 
+int ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
 int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
 int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index b05cb0ec34..0801424245 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -935,15 +935,111 @@  ngbe_dev_clear_queues(struct rte_eth_dev *dev)
 	}
 }
 
+static int __rte_cold
+ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
+{
+	struct ngbe_rx_entry *rxe = rxq->sw_ring;
+	uint64_t dma_addr;
+	unsigned int i;
+
+	/* Initialize software ring entries */
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		/* the ring can also be modified by hardware */
+		volatile struct ngbe_rx_desc *rxd;
+		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+		if (mbuf == NULL) {
+			PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u",
+				     (unsigned int)rxq->queue_id,
+				     (unsigned int)rxq->port_id);
+			return -ENOMEM;
+		}
+
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+		rxd = &rxq->rx_ring[i];
+		NGBE_RXD_HDRADDR(rxd, 0);
+		NGBE_RXD_PKTADDR(rxd, dma_addr);
+		rxe[i].mbuf = mbuf;
+	}
+
+	return 0;
+}
+
 /*
  * Initializes Receive Unit.
  */
 int __rte_cold
 ngbe_dev_rx_init(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
+	struct ngbe_hw *hw;
+	struct ngbe_rx_queue *rxq;
+	uint64_t bus_addr;
+	uint32_t fctrl;
+	uint32_t hlreg0;
+	uint32_t srrctl;
+	uint16_t buf_size;
+	uint16_t i;
 
-	return -EINVAL;
+	PMD_INIT_FUNC_TRACE();
+	hw = ngbe_dev_hw(dev);
+
+	/*
+	 * Make sure receives are disabled while setting
+	 * up the Rx context (registers, descriptor rings, etc.).
+	 */
+	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
+	wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
+
+	/* Enable receipt of broadcasted frames */
+	fctrl = rd32(hw, NGBE_PSRCTL);
+	fctrl |= NGBE_PSRCTL_BCA;
+	wr32(hw, NGBE_PSRCTL, fctrl);
+
+	hlreg0 = rd32(hw, NGBE_SECRXCTL);
+	hlreg0 &= ~NGBE_SECRXCTL_XDSA;
+	wr32(hw, NGBE_SECRXCTL, hlreg0);
+
+	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
+			NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));
+
+	/* Setup Rx queues */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		/* Setup the Base and Length of the Rx Descriptor Rings */
+		bus_addr = rxq->rx_ring_phys_addr;
+		wr32(hw, NGBE_RXBAL(rxq->reg_idx),
+				(uint32_t)(bus_addr & BIT_MASK32));
+		wr32(hw, NGBE_RXBAH(rxq->reg_idx),
+				(uint32_t)(bus_addr >> 32));
+		wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
+		wr32(hw, NGBE_RXWP(rxq->reg_idx), 0);
+
+		srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
+
+		/* Set if packets are dropped when no descriptors available */
+		if (rxq->drop_en)
+			srrctl |= NGBE_RXCFG_DROP;
+
+		/*
+		 * Configure the Rx buffer size in the PKTLEN field of
+		 * the RXCFG register of the queue.
+		 * The value is in 1 KB resolution. Valid values can be from
+		 * 1 KB to 16 KB.
+		 */
+		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+		buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
+		srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
+
+		wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
+	}
+
+	return 0;
 }
 
 /*
@@ -988,7 +1084,9 @@  ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
 	struct ngbe_hw     *hw;
 	struct ngbe_tx_queue *txq;
+	struct ngbe_rx_queue *rxq;
 	uint32_t dmatxctl;
+	uint32_t rxctrl;
 	uint16_t i;
 	int ret = 0;
 
@@ -1018,7 +1116,39 @@  ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
 		}
 	}
 
-	return -EINVAL;
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (rxq->rx_deferred_start == 0) {
+			ret = ngbe_dev_rx_queue_start(dev, i);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	/* Enable Receive engine */
+	rxctrl = rd32(hw, NGBE_PBRXCTL);
+	rxctrl |= NGBE_PBRXCTL_ENA;
+	hw->mac.enable_rx_dma(hw, rxctrl);
+
+	return 0;
+}
+
+void
+ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
+{
+	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+	*(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
+	*(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
+	*(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
+}
+
+void
+ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
+{
+	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+	wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
+	wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
+	wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
 }
 
 void
@@ -1039,6 +1169,85 @@  ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
 	wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
 }
 
+/*
+ * Start Receive Units for specified queue.
+ */
+int __rte_cold
+ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_rx_queue *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+
+	/* Allocate buffers for descriptor rings */
+	if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
+		PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+			     rx_queue_id);
+		return -1;
+	}
+	rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
+	rxdctl |= NGBE_RXCFG_ENA;
+	wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
+
+	/* Wait until Rx Enable ready */
+	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
+	} while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
+	if (poll_ms == 0)
+		PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+	rte_wmb();
+	wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
+	wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __rte_cold
+ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+	struct ngbe_rx_queue *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+
+	ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
+	wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
+
+	/* Wait until Rx Enable bit clear */
+	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
+	} while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
+	if (poll_ms == 0)
+		PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
+
+	rte_delay_us(RTE_NGBE_WAIT_100_US);
+	ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
+
+	ngbe_rx_queue_release_mbufs(rxq);
+	ngbe_reset_rx_queue(adapter, rxq);
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
 /*
  * Start Transmit Units for specified queue.
  */