From patchwork Tue Sep 1 11:50:50 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 76222 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id EC172A04AC; Tue, 1 Sep 2020 13:55:05 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 77D1B1C1D2; Tue, 1 Sep 2020 13:51:36 +0200 (CEST) Received: from smtpbgau1.qq.com (smtpbgau1.qq.com [54.206.16.166]) by dpdk.org (Postfix) with ESMTP id C64ED1C199 for ; Tue, 1 Sep 2020 13:51:28 +0200 (CEST) X-QQ-mid: bizesmtp5t1598961084t4r06of45 Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Tue, 01 Sep 2020 19:51:23 +0800 (CST) X-QQ-SSF: 01400000000000B0C000000A0000000 X-QQ-FEAT: iN6ept6c8LF8rqGW3OPNb9mRuwjcR+w4rQKQ8zSY6l2KqtGvt8PeuIW2L8p0+ egrJhVs+PHW5DiN2oqYzh4Cqe3YZT+1p7agvDC90/IaG0Bzujly14O8rI2rPG/rTADQHeKa ezrzjFZ79BwPPJ1JlM7w6MOHMd/vGP5oSni3jIfgUE/fXe+TgztuJrs3717ltmsJVxWJV0T 9LYkK+P9q7VU2LWqo1irEVwe875D1FIWphSioKtvHcU+kUpwAHKj4weeXV/azSHoZ19LOH+ FccLgflgdYPmbu8Uv9FCA7azbKZa35vNgIY5DIkX/P/paW40mOnRZ+ElQGUjrNeMXipwsA1 gjkB4u31rH5gckDZrw+OA5b0+DO7g== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 1 Sep 2020 19:50:50 +0800 Message-Id: <20200901115113.1529675-19-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20200901115113.1529675-1-jiawenwu@trustnetic.com> References: <20200901115113.1529675-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v1 19/42] net/txgbe: add RX and TX start X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add receive and transmit units start for specified queue. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/base/txgbe_hw.h | 1 + drivers/net/txgbe/txgbe_ethdev.c | 2 + drivers/net/txgbe/txgbe_ethdev.h | 4 + drivers/net/txgbe/txgbe_rxtx.c | 175 +++++++++++++++++++++++++++++- drivers/net/txgbe/txgbe_rxtx.h | 62 +++++++++++ 5 files changed, 243 insertions(+), 1 deletion(-) diff --git a/drivers/net/txgbe/base/txgbe_hw.h b/drivers/net/txgbe/base/txgbe_hw.h index f57c26bee..a597383b8 100644 --- a/drivers/net/txgbe/base/txgbe_hw.h +++ b/drivers/net/txgbe/base/txgbe_hw.h @@ -56,5 +56,6 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw); s32 txgbe_reset_hw(struct txgbe_hw *hw); s32 txgbe_start_hw_raptor(struct txgbe_hw *hw); s32 txgbe_init_phy_raptor(struct txgbe_hw *hw); +s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval); bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw); #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index abc457109..4fab88c5c 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -1319,6 +1319,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .link_update = txgbe_dev_link_update, .stats_get = txgbe_dev_stats_get, .stats_reset = txgbe_dev_stats_reset, + .rx_queue_start = txgbe_dev_rx_queue_start, + .tx_queue_start = txgbe_dev_tx_queue_start, .dev_led_on = txgbe_dev_led_on, .dev_led_off = txgbe_dev_led_off, }; diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 6739b580c..2dc0327cb 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -90,6 +90,10 @@ void txgbe_dev_tx_init(struct rte_eth_dev *dev); int txgbe_dev_rxtx_start(struct rte_eth_dev *dev); +int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index d3782f44d..ad5d1d22f 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -14,6 +14,7 @@ #include #include +#include #include "txgbe_logs.h" #include "base/txgbe.h" @@ -134,6 +135,38 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev) RTE_SET_USED(dev); } +static int __rte_cold +txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq) +{ + struct txgbe_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned int i; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile struct txgbe_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", + (unsigned) rxq->queue_id); + return -ENOMEM; + } + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = &rxq->rx_ring[i]; + TXGBE_RXD_HDRADDR(rxd, 0); + TXGBE_RXD_PKTADDR(rxd, dma_addr); + rxe[i].mbuf = mbuf; + } + + return 0; +} + void __rte_cold txgbe_set_rx_function(struct rte_eth_dev *dev) { @@ -382,13 +415,153 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev) } } +/* + * Set up link loopback mode Tx->Rx. + */ +static inline void __rte_cold +txgbe_setup_loopback_link_raptor(struct txgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_LB, TXGBE_MACRXCFG_LB); + + msec_delay(50); +} + /* * Start Transmit and Receive Units. */ int __rte_cold txgbe_dev_rxtx_start(struct rte_eth_dev *dev) { - RTE_SET_USED(dev); + struct txgbe_hw *hw; + struct txgbe_tx_queue *txq; + struct txgbe_rx_queue *rxq; + uint32_t dmatxctl; + uint32_t rxctrl; + uint16_t i; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + hw = TXGBE_DEV_HW(dev); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + wr32m(hw, TXGBE_TXCFG(txq->reg_idx), + TXGBE_TXCFG_HTHRESH_MASK | + TXGBE_TXCFG_WTHRESH_MASK, + TXGBE_TXCFG_HTHRESH(txq->hthresh) | + TXGBE_TXCFG_WTHRESH(txq->wthresh)); + } + + dmatxctl = rd32(hw, TXGBE_DMATXCTRL); + dmatxctl |= TXGBE_DMATXCTRL_ENA; + wr32(hw, TXGBE_DMATXCTRL, dmatxctl); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq->tx_deferred_start) { + ret = txgbe_dev_tx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq->rx_deferred_start) { + ret = txgbe_dev_rx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } + + /* Enable Receive engine */ + rxctrl = rd32(hw, TXGBE_PBRXCTL); + rxctrl |= TXGBE_PBRXCTL_ENA; + hw->mac.enable_rx_dma(hw, rxctrl); + + /* If loopback mode is enabled, set up the link accordingly */ + if (hw->mac.type == txgbe_mac_raptor && + dev->data->dev_conf.lpbk_mode) + txgbe_setup_loopback_link_raptor(hw); + + return 0; +} + + +/* + * Start Receive Units for specified queue. + */ +int __rte_cold +txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + rxdctl |= TXGBE_RXCFG_ENA; + wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); + rte_wmb(); + wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0); + wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Start Transmit Units for specified queue. + */ +int __rte_cold +txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA); + + /* Wait until TX Enable ready */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx)); + } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d", tx_queue_id); + + rte_wmb(); + wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; } diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index 2d337c46a..b8ca83672 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -5,20 +5,78 @@ #ifndef _TXGBE_RXTX_H_ #define _TXGBE_RXTX_H_ +/***************************************************************************** + * Receive Descriptor + *****************************************************************************/ +struct txgbe_rx_desc { + struct { + union { + __le32 dw0; + struct { + __le16 pkt; + __le16 hdr; + } lo; + }; + union { + __le32 dw1; + struct { + __le16 ipid; + __le16 csum; + } hi; + }; + } qw0; /* also as r.pkt_addr */ + struct { + union { + __le32 dw2; + struct { + __le32 status; + } lo; + }; + union { + __le32 dw3; + struct { + __le16 len; + __le16 tag; + } hi; + }; + } qw1; /* also as r.hdr_addr */ +}; + +/* @txgbe_rx_desc.qw0 */ +#define TXGBE_RXD_PKTADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v)) + +/* @txgbe_rx_desc.qw1 */ +#define TXGBE_RXD_HDRADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v)) #define RTE_PMD_TXGBE_TX_MAX_BURST 32 #define RTE_PMD_TXGBE_RX_MAX_BURST 32 +#define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS 10 + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct txgbe_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + /** * Structure associated with each RX queue. */ struct txgbe_rx_queue { struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */ uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t queue_id; /**< RX queue index. */ uint16_t reg_idx; /**< RX queue register index. */ + uint16_t port_id; /**< Device port identifier. */ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint8_t rx_deferred_start; /**< not in global dev start. */ uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ }; @@ -28,11 +86,15 @@ struct txgbe_rx_queue { struct txgbe_tx_queue { uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< current value of TDT reg. */ /**< Start freeing TX buffers if there are less free descriptors than this value. */ uint16_t tx_free_thresh; uint16_t reg_idx; /**< TX queue register index. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold reg. */ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */ + uint8_t tx_deferred_start; /**< not in global dev start. */ }; /* Takes an ethdev and a queue and sets up the tx function to be used based on