@@ -56,5 +56,6 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw);
s32 txgbe_reset_hw(struct txgbe_hw *hw);
s32 txgbe_start_hw_raptor(struct txgbe_hw *hw);
s32 txgbe_init_phy_raptor(struct txgbe_hw *hw);
+s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval);
bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw);
#endif /* _TXGBE_HW_H_ */
@@ -1319,6 +1319,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.link_update = txgbe_dev_link_update,
.stats_get = txgbe_dev_stats_get,
.stats_reset = txgbe_dev_stats_reset,
+ .rx_queue_start = txgbe_dev_rx_queue_start,
+ .tx_queue_start = txgbe_dev_tx_queue_start,
.dev_led_on = txgbe_dev_led_on,
.dev_led_off = txgbe_dev_led_off,
};
@@ -90,6 +90,10 @@ void txgbe_dev_tx_init(struct rte_eth_dev *dev);
int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -14,6 +14,7 @@
#include <inttypes.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "txgbe_logs.h"
#include "base/txgbe.h"
@@ -134,6 +135,38 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
RTE_SET_USED(dev);
}
+static int __rte_cold
+txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
+{
+ struct txgbe_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned int i;
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile struct txgbe_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+ (unsigned) rxq->queue_id);
+ return -ENOMEM;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxd = &rxq->rx_ring[i];
+ TXGBE_RXD_HDRADDR(rxd, 0);
+ TXGBE_RXD_PKTADDR(rxd, dma_addr);
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
void __rte_cold
txgbe_set_rx_function(struct rte_eth_dev *dev)
{
@@ -382,13 +415,153 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev)
}
}
+/*
+ * Set up link loopback mode Tx->Rx.
+ */
+static inline void __rte_cold
+txgbe_setup_loopback_link_raptor(struct txgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_LB, TXGBE_MACRXCFG_LB);
+
+ msec_delay(50);
+}
+
/*
* Start Transmit and Receive Units.
*/
int __rte_cold
txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
- RTE_SET_USED(dev);
+ struct txgbe_hw *hw;
+ struct txgbe_tx_queue *txq;
+ struct txgbe_rx_queue *rxq;
+ uint32_t dmatxctl;
+ uint32_t rxctrl;
+ uint16_t i;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
+ TXGBE_TXCFG_HTHRESH_MASK |
+ TXGBE_TXCFG_WTHRESH_MASK,
+ TXGBE_TXCFG_HTHRESH(txq->hthresh) |
+ TXGBE_TXCFG_WTHRESH(txq->wthresh));
+ }
+
+ dmatxctl = rd32(hw, TXGBE_DMATXCTRL);
+ dmatxctl |= TXGBE_DMATXCTRL_ENA;
+ wr32(hw, TXGBE_DMATXCTRL, dmatxctl);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq->tx_deferred_start) {
+ ret = txgbe_dev_tx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq->rx_deferred_start) {
+ ret = txgbe_dev_rx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Enable Receive engine */
+ rxctrl = rd32(hw, TXGBE_PBRXCTL);
+ rxctrl |= TXGBE_PBRXCTL_ENA;
+ hw->mac.enable_rx_dma(hw, rxctrl);
+
+ /* If loopback mode is enabled, set up the link accordingly */
+ if (hw->mac.type == txgbe_mac_raptor &&
+ dev->data->dev_conf.lpbk_mode)
+ txgbe_setup_loopback_link_raptor(hw);
+
+ return 0;
+}
+
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /* Allocate buffers for descriptor rings */
+ if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
+ return -1;
+ }
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ rxdctl |= TXGBE_RXCFG_ENA;
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
+ wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_tx_queue *txq;
+ uint32_t txdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
+
+ /* Wait until TX Enable ready */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable "
+ "Tx Queue %d", tx_queue_id);
+
+ rte_wmb();
+ wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
return 0;
}
@@ -5,20 +5,78 @@
#ifndef _TXGBE_RXTX_H_
#define _TXGBE_RXTX_H_
+/*****************************************************************************
+ * Receive Descriptor
+ *****************************************************************************/
+struct txgbe_rx_desc {
+ struct {
+ union {
+ __le32 dw0;
+ struct {
+ __le16 pkt;
+ __le16 hdr;
+ } lo;
+ };
+ union {
+ __le32 dw1;
+ struct {
+ __le16 ipid;
+ __le16 csum;
+ } hi;
+ };
+ } qw0; /* also as r.pkt_addr */
+ struct {
+ union {
+ __le32 dw2;
+ struct {
+ __le32 status;
+ } lo;
+ };
+ union {
+ __le32 dw3;
+ struct {
+ __le16 len;
+ __le16 tag;
+ } hi;
+ };
+ } qw1; /* also as r.hdr_addr */
+};
+
+/* @txgbe_rx_desc.qw0 */
+#define TXGBE_RXD_PKTADDR(rxd, v) \
+ (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v))
+
+/* @txgbe_rx_desc.qw1 */
+#define TXGBE_RXD_HDRADDR(rxd, v) \
+ (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))
#define RTE_PMD_TXGBE_TX_MAX_BURST 32
#define RTE_PMD_TXGBE_RX_MAX_BURST 32
+#define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS 10
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct txgbe_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
/**
* Structure associated with each RX queue.
*/
struct txgbe_rx_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */
uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint8_t rx_deferred_start; /**< not in global dev start. */
uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
};
@@ -28,11 +86,15 @@ struct txgbe_rx_queue {
struct txgbe_tx_queue {
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< current value of TDT reg. */
/**< Start freeing TX buffers if there are less free descriptors than
this value. */
uint16_t tx_free_thresh;
uint16_t reg_idx; /**< TX queue register index. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold reg. */
uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+ uint8_t tx_deferred_start; /**< not in global dev start. */
};
/* Takes an ethdev and a queue and sets up the tx function to be used based on