[v8,15/19] net/ngbe: add Tx queue start and stop

Message ID 20210708093239.13896-16-jiawenwu@trustnetic.com (mailing list archive)
State Accepted, archived
Delegated to: Andrew Rybchenko
Headers
Series net: ngbe PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Jiawen Wu July 8, 2021, 9:32 a.m. UTC
  Initializes transmit unit, support to start and stop transmit unit for
specified queues.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/ngbe.ini |   1 +
 drivers/net/ngbe/base/ngbe_type.h |   1 +
 drivers/net/ngbe/ngbe_ethdev.c    |   3 +
 drivers/net/ngbe/ngbe_ethdev.h    |   7 ++
 drivers/net/ngbe/ngbe_rxtx.c      | 162 +++++++++++++++++++++++++++++-
 drivers/net/ngbe/ngbe_rxtx.h      |   3 +
 6 files changed, 175 insertions(+), 2 deletions(-)
  

Patch

diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini
index 291a542a42..08d5f1b0dc 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -7,6 +7,7 @@ 
 Speed capabilities   = Y
 Link status          = Y
 Link status event    = Y
+Queue start/stop     = Y
 Multiprocess aware   = Y
 Linux                = Y
 ARMv8                = Y
diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h
index 3f6698be15..2846a6a2b6 100644
--- a/drivers/net/ngbe/base/ngbe_type.h
+++ b/drivers/net/ngbe/base/ngbe_type.h
@@ -190,6 +190,7 @@  struct ngbe_hw {
 	u16 nb_rx_queues;
 	u16 nb_tx_queues;
 
+	u32 q_tx_regs[8 * 4];
 	bool is_pf;
 };
 
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index f88e71b855..f1911bdcbc 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -598,6 +598,7 @@  ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 				ETH_LINK_SPEED_10M;
 
 	/* Driver-preferred Rx/Tx parameters */
+	dev_info->default_txportconf.burst_size = 32;
 	dev_info->default_rxportconf.nb_queues = 1;
 	dev_info->default_txportconf.nb_queues = 1;
 	dev_info->default_rxportconf.ring_size = 256;
@@ -1089,6 +1090,8 @@  static const struct eth_dev_ops ngbe_eth_dev_ops = {
 	.dev_start                  = ngbe_dev_start,
 	.dev_stop                   = ngbe_dev_stop,
 	.link_update                = ngbe_dev_link_update,
+	.tx_queue_start	            = ngbe_dev_tx_queue_start,
+	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
 	.rx_queue_release           = ngbe_dev_rx_queue_release,
 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 9e086eea58..5c2aea8d50 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -86,6 +86,13 @@  void ngbe_dev_tx_init(struct rte_eth_dev *dev);
 
 int ngbe_dev_rxtx_start(struct rte_eth_dev *dev);
 
+void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
+void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
+
+int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
 void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
 			       uint8_t queue, uint8_t msix_vector);
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 6cb0465ac4..63f0647413 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -528,7 +528,32 @@  ngbe_dev_rx_init(struct rte_eth_dev *dev)
 void
 ngbe_dev_tx_init(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
+	struct ngbe_hw     *hw;
+	struct ngbe_tx_queue *txq;
+	uint64_t bus_addr;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = ngbe_dev_hw(dev);
+
+	wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
+	wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
+
+	/* Setup the Base and Length of the Tx Descriptor Rings */
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+
+		bus_addr = txq->tx_ring_phys_addr;
+		wr32(hw, NGBE_TXBAL(txq->reg_idx),
+				(uint32_t)(bus_addr & BIT_MASK32));
+		wr32(hw, NGBE_TXBAH(txq->reg_idx),
+				(uint32_t)(bus_addr >> 32));
+		wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
+			NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
+		/* Setup the HW Tx Head and TX Tail descriptor pointers */
+		wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
+		wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
+	}
 }
 
 /*
@@ -537,7 +562,140 @@  ngbe_dev_tx_init(struct rte_eth_dev *dev)
 int
 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
+	struct ngbe_hw     *hw;
+	struct ngbe_tx_queue *txq;
+	uint32_t dmatxctl;
+	uint16_t i;
+	int ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = ngbe_dev_hw(dev);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		/* Setup Transmit Threshold Registers */
+		wr32m(hw, NGBE_TXCFG(txq->reg_idx),
+		      NGBE_TXCFG_HTHRESH_MASK |
+		      NGBE_TXCFG_WTHRESH_MASK,
+		      NGBE_TXCFG_HTHRESH(txq->hthresh) |
+		      NGBE_TXCFG_WTHRESH(txq->wthresh));
+	}
+
+	dmatxctl = rd32(hw, NGBE_DMATXCTRL);
+	dmatxctl |= NGBE_DMATXCTRL_ENA;
+	wr32(hw, NGBE_DMATXCTRL, dmatxctl);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (txq->tx_deferred_start == 0) {
+			ret = ngbe_dev_tx_queue_start(dev, i);
+			if (ret < 0)
+				return ret;
+		}
+	}
 
 	return -EINVAL;
 }
+
+void
+ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
+{
+	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+	*(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
+	*(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
+	*(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
+}
+
+void
+ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
+{
+	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+	wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
+	wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
+	wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
+}
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int
+ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_tx_queue *txq;
+	uint32_t txdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	txq = dev->data->tx_queues[tx_queue_id];
+	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
+
+	/* Wait until Tx Enable ready */
+	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
+	} while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
+	if (poll_ms == 0)
+		PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+			     tx_queue_id);
+
+	rte_wmb();
+	wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int
+ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_tx_queue *txq;
+	uint32_t txdctl;
+	uint32_t txtdh, txtdt;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	txq = dev->data->tx_queues[tx_queue_id];
+
+	/* Wait until Tx queue is empty */
+	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_us(RTE_NGBE_WAIT_100_US);
+		txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
+		txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
+	} while (--poll_ms && (txtdh != txtdt));
+	if (poll_ms == 0)
+		PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
+			     tx_queue_id);
+
+	ngbe_dev_save_tx_queue(hw, txq->reg_idx);
+	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
+
+	/* Wait until Tx Enable bit clear */
+	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
+	} while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
+	if (poll_ms == 0)
+		PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+			     tx_queue_id);
+
+	rte_delay_us(RTE_NGBE_WAIT_100_US);
+	ngbe_dev_store_tx_queue(hw, txq->reg_idx);
+
+	if (txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->reset(txq);
+	}
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index 4b824d4ef8..03e98284a8 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -73,6 +73,9 @@  struct ngbe_tx_desc {
 #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
 		    sizeof(struct ngbe_rx_desc))
 
+#define RTE_NGBE_REGISTER_POLL_WAIT_10_MS  10
+#define RTE_NGBE_WAIT_100_US               100
+
 #define NGBE_TX_MAX_SEG                    40
 
 #ifndef DEFAULT_TX_FREE_THRESH