From patchwork Tue Sep 1 11:51:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 76237 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 79A5FA04AC; Tue, 1 Sep 2020 13:58:06 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8DE291C2B4; Tue, 1 Sep 2020 13:51:55 +0200 (CEST) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id B8B741C221 for ; Tue, 1 Sep 2020 13:51:45 +0200 (CEST) X-QQ-mid: bizesmtp5t1598961102t0hac6q5d Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Tue, 01 Sep 2020 19:51:41 +0800 (CST) X-QQ-SSF: 01400000000000B0C000000A0000000 X-QQ-FEAT: urqllK+UIe7X3ixCr0kZrbJZX1dNIr/0MLzQNsBxvl3sNWYR+MQEVTVVjIVHY cK3qWn2ZHTCRCRESPPMXBhf9t+gi50ch93pMfaTQL0B3CQVrru5pyJIDr/uoCTkmVkFxLER 28x7ry1njlv9tbjykzCe4i26kdyhkKTW/vIO4T1LSAxvw4MZFJn6cWlmIpFTjikAjJpxgg8 qDESMxE0+cl+YdxGfYhl3vwyun/cn1V6Bvkpnk0fDYb0fLH7kPZnaFKBfM4NeqdHEPODKBn Yw4iSbuHgwfYmRnSQpwmG5o6HGcgBSmu8BZijjGfDos+hHmLaB5gw9/We/Yj0K1BdYuIsVG FQaUjdZgDaSfgjO9eat6o+23aZmaA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 1 Sep 2020 19:51:05 +0800 Message-Id: <20200901115113.1529675-34-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20200901115113.1529675-1-jiawenwu@trustnetic.com> References: <20200901115113.1529675-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v1 34/42] net/txgbe: add remaining RX and TX queue operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add remaining receive and transmit queue operaions. Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 123 +++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 16 ++ drivers/net/txgbe/txgbe_rxtx.c | 259 +++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_rxtx.h | 1 + 4 files changed, 399 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index ba2849a82..54c97f81c 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -622,6 +622,46 @@ static struct rte_pci_driver rte_txgbe_pmd = { +static int +txgbe_check_mq_mode(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +static int +txgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* multiple queue mode checking */ + ret = txgbe_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.", + ret); + return ret; + } + + /* set flag to update link status after init */ + intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + + return 0; +} + static void txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) { @@ -2062,6 +2102,47 @@ txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) return 0; } +static int +txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t mask; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (queue_id < 32) { + mask = rd32(hw, TXGBE_IMS(0)); + mask &= (1 << queue_id); + wr32(hw, TXGBE_IMS(0), mask); + } else if (queue_id < 64) { + mask = rd32(hw, TXGBE_IMS(1)); + mask &= (1 << (queue_id - 32)); + wr32(hw, TXGBE_IMS(1), mask); + } + rte_intr_enable(intr_handle); + + return 0; +} + +static int +txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (queue_id < 32) { + mask = rd32(hw, TXGBE_IMS(0)); + mask &= ~(1 << queue_id); + wr32(hw, TXGBE_IMS(0), mask); + } else if (queue_id < 64) { + mask = rd32(hw, TXGBE_IMS(1)); + mask &= ~(1 << (queue_id - 32)); + wr32(hw, TXGBE_IMS(1), mask); + } + + return 0; +} + /** * set the IVAR registers, mapping interrupt causes to vectors * @param hw @@ -2151,6 +2232,37 @@ txgbe_configure_msix(struct rte_eth_dev *dev) | TXGBE_ITR_WRDSA); } +int +txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t bcnrc_val; + + if (queue_idx >= hw->mac.max_tx_queues) + return -EINVAL; + + if (tx_rate != 0) { + bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate); + bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2); + } else { + bcnrc_val = 0; + } + + /* + * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. + */ + wr32(hw, TXGBE_ARBTXMMW, 0x14); + + /* Set ARBTXRATE of queue X */ + wr32(hw, TXGBE_ARBPOOLIDX, queue_idx); + wr32(hw, TXGBE_ARBTXRATE, bcnrc_val); + txgbe_flush(hw); + + return 0; +} + static u8 * txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) @@ -2299,6 +2411,7 @@ txgbe_get_module_eeprom(struct rte_eth_dev *dev, } static const struct eth_dev_ops txgbe_eth_dev_ops = { + .dev_configure = txgbe_dev_configure, .dev_start = txgbe_dev_start, .dev_stop = txgbe_dev_stop, .dev_set_link_up = txgbe_dev_set_link_up, @@ -2322,7 +2435,13 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .tx_queue_start = txgbe_dev_tx_queue_start, .tx_queue_stop = txgbe_dev_tx_queue_stop, .rx_queue_setup = txgbe_dev_rx_queue_setup, + .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable, .rx_queue_release = txgbe_dev_rx_queue_release, + .rx_queue_count = txgbe_dev_rx_queue_count, + .rx_descriptor_done = txgbe_dev_rx_descriptor_done, + .rx_descriptor_status = txgbe_dev_rx_descriptor_status, + .tx_descriptor_status = txgbe_dev_tx_descriptor_status, .tx_queue_setup = txgbe_dev_tx_queue_setup, .tx_queue_release = txgbe_dev_tx_queue_release, .dev_led_on = txgbe_dev_led_on, @@ -2330,12 +2449,16 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .mac_addr_add = txgbe_add_rar, .mac_addr_remove = txgbe_remove_rar, .mac_addr_set = txgbe_set_default_mac_addr, + .set_queue_rate_limit = txgbe_set_queue_rate_limit, .set_mc_addr_list = txgbe_dev_set_mc_addr_list, + .rxq_info_get = txgbe_rxq_info_get, + .txq_info_get = txgbe_txq_info_get, .get_eeprom_length = txgbe_get_eeprom_length, .get_eeprom = txgbe_get_eeprom, .set_eeprom = txgbe_set_eeprom, .get_module_info = txgbe_get_module_info, .get_module_eeprom = txgbe_get_module_eeprom, + .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd); diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index b25846721..017d708ae 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -125,6 +125,14 @@ int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); +uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); +int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + int txgbe_dev_rx_init(struct rte_eth_dev *dev); void txgbe_dev_tx_init(struct rte_eth_dev *dev); @@ -144,6 +152,12 @@ int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); @@ -182,6 +196,8 @@ void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); +int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t tx_rate); #define TXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ #define TXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ #define TXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index f50bc82ce..df094408f 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -1900,6 +1900,97 @@ txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq) } } +static int +txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt) +{ + struct txgbe_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (pkt_cnt < free_cnt) { + if (txgbe_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_free_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh) + break; + + n = txgbe_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +int +txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) +{ + struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue; + if (txq->offloads == 0 && + txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) { + + return txgbe_tx_done_cleanup_simple(txq, free_cnt); + } + + return txgbe_tx_done_cleanup_full(txq, free_cnt); +} + static void __rte_cold txgbe_tx_free_swring(struct txgbe_tx_queue *txq) { @@ -1924,9 +2015,49 @@ txgbe_dev_tx_queue_release(void *txq) txgbe_tx_queue_release(txq); } +/* (Re)set dynamic txgbe_tx_queue fields to defaults */ +static void __rte_cold +txgbe_reset_tx_queue(struct txgbe_tx_queue *txq) +{ + static const struct txgbe_tx_desc zeroed_desc = {0}; + struct txgbe_tx_entry *txe = txq->sw_ring; + uint16_t prev, i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; + } + + /* Initialize SW ring entries */ + prev = (uint16_t) (txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i]; + + txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1); + txq->tx_tail = 0; + + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + txq->ctx_curr = 0; + memset((void *)&txq->ctx_cache, 0, + TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info)); +} + static const struct txgbe_txq_ops def_txq_ops = { .release_mbufs = txgbe_tx_queue_release_mbufs, .free_swring = txgbe_tx_free_swring, + .reset = txgbe_reset_tx_queue, }; /* Takes an ethdev and a queue and sets up the tx function to be used based on @@ -2491,6 +2622,97 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return 0; } +uint32_t +txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define TXGBE_RXQ_SCAN_INTERVAL 4 + volatile struct txgbe_rx_desc *rxdp; + struct txgbe_rx_queue *rxq; + uint32_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->qw1.lo.status & + rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) { + desc += TXGBE_RXQ_SCAN_INTERVAL; + rxdp += TXGBE_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile struct txgbe_rx_desc *rxdp; + struct txgbe_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->qw1.lo.status & + rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)); +} + +int +txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct txgbe_rx_queue *rxq = rx_queue; + volatile uint32_t *status; + uint32_t nb_hold, desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + nb_hold = rxq->nb_rx_hold; + if (offset >= rxq->nb_rx_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].qw1.lo.status; + if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct txgbe_tx_queue *txq = tx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].dw3; + if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + void __rte_cold txgbe_dev_clear_queues(struct rte_eth_dev *dev) { @@ -3094,3 +3316,40 @@ txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +void +txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct txgbe_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; +} + +void +txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct txgbe_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index 958ca2e97..f773357a3 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -402,6 +402,7 @@ struct txgbe_txq_ops { void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq); void txgbe_set_rx_function(struct rte_eth_dev *dev); +int txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt); uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev); uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);