From patchwork Thu Aug 31 09:56:35 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Renyong Wan X-Patchwork-Id: 130950 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 33CCE41FDA; Thu, 31 Aug 2023 11:59:58 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B0A67402EF; Thu, 31 Aug 2023 11:58:20 +0200 (CEST) Received: from VLXDG1SPAM1.ramaxel.com (email.ramaxel.com [221.4.138.186]) by mails.dpdk.org (Postfix) with ESMTP id 06C69402D8 for ; Thu, 31 Aug 2023 11:58:15 +0200 (CEST) Received: from V12DG1MBS03.ramaxel.local ([172.26.18.33]) by VLXDG1SPAM1.ramaxel.com with ESMTP id 37V9vjkp064358; Thu, 31 Aug 2023 17:57:45 +0800 (GMT-8) (envelope-from wanry@3snic.com) Received: from localhost.localdomain (10.64.136.151) by V12DG1MBS03.ramaxel.local (172.26.18.33) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.17; Thu, 31 Aug 2023 17:57:44 +0800 From: To: CC: , Renyong Wan , Steven Song Subject: [PATCH v2 17/32] net/sssnic: support Tx queue start and stop Date: Thu, 31 Aug 2023 17:56:35 +0800 Message-ID: <20230831095650.219964-18-wanry@3snic.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230831095650.219964-1-wanry@3snic.com> References: <20230831095650.219964-1-wanry@3snic.com> MIME-Version: 1.0 X-Originating-IP: [10.64.136.151] X-ClientProxiedBy: V12DG1MBS03.ramaxel.local (172.26.18.33) To V12DG1MBS03.ramaxel.local (172.26.18.33) X-DNSRBL: X-SPAM-SOURCE-CHECK: pass X-MAIL: VLXDG1SPAM1.ramaxel.com 37V9vjkp064358 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Renyong Wan Signed-off-by: Steven Song Signed-off-by: Renyong Wan --- doc/guides/nics/features/sssnic.ini | 1 + drivers/net/sssnic/sssnic_ethdev.c | 2 + drivers/net/sssnic/sssnic_ethdev_tx.c | 155 ++++++++++++++++++++++++++ drivers/net/sssnic/sssnic_ethdev_tx.h | 4 + 4 files changed, 162 insertions(+) diff --git a/doc/guides/nics/features/sssnic.ini b/doc/guides/nics/features/sssnic.ini index 82b527ba26..b75c68cb33 100644 --- a/doc/guides/nics/features/sssnic.ini +++ b/doc/guides/nics/features/sssnic.ini @@ -6,6 +6,7 @@ [Features] Link status = Y Link status event = Y +Queue start/stop = Y Unicast MAC filter = Y Multicast MAC filter = Y Linux = Y diff --git a/drivers/net/sssnic/sssnic_ethdev.c b/drivers/net/sssnic/sssnic_ethdev.c index 208f0db402..8a18f25889 100644 --- a/drivers/net/sssnic/sssnic_ethdev.c +++ b/drivers/net/sssnic/sssnic_ethdev.c @@ -360,6 +360,8 @@ static const struct eth_dev_ops sssnic_ethdev_ops = { .tx_queue_release = sssnic_ethdev_tx_queue_release, .rx_queue_start = sssnic_ethdev_rx_queue_start, .rx_queue_stop = sssnic_ethdev_rx_queue_stop, + .tx_queue_start = sssnic_ethdev_tx_queue_start, + .tx_queue_stop = sssnic_ethdev_tx_queue_stop, }; static int diff --git a/drivers/net/sssnic/sssnic_ethdev_tx.c b/drivers/net/sssnic/sssnic_ethdev_tx.c index d77cbc8647..392b877974 100644 --- a/drivers/net/sssnic/sssnic_ethdev_tx.c +++ b/drivers/net/sssnic/sssnic_ethdev_tx.c @@ -191,6 +191,18 @@ sssnic_ethdev_txq_ci_get(struct sssnic_ethdev_txq *txq) return sssnic_workq_ci_get(txq->workq); } +static inline int +sssnic_ethdev_txq_pi_get(struct sssnic_ethdev_txq *txq) +{ + return sssnic_workq_pi_get(txq->workq); +} + +static inline uint16_t +sssnic_ethdev_txq_hw_ci_get(struct sssnic_ethdev_txq *txq) +{ + return *txq->hw_ci_addr & txq->idx_mask; +} + static inline void sssnic_ethdev_txq_consume(struct sssnic_ethdev_txq *txq, uint16_t num_entries) { @@ -352,3 +364,146 @@ sssnic_ethdev_tx_queue_all_release(struct rte_eth_dev *ethdev) for (qid = 0; qid < ethdev->data->nb_tx_queues; qid++) sssnic_ethdev_tx_queue_release(ethdev, qid); } + +#define SSSNIC_ETHDEV_TX_FREE_BULK 64 +static inline int +sssnic_ethdev_txq_pktmbufs_cleanup(struct sssnic_ethdev_txq *txq) +{ + struct sssnic_ethdev_tx_entry *txe; + struct rte_mbuf *free_pkts[SSSNIC_ETHDEV_TX_FREE_BULK]; + uint16_t num_free_pkts = 0; + uint16_t hw_ci, ci, id_mask; + uint16_t count = 0; + int num_entries; + + ci = sssnic_ethdev_txq_ci_get(txq); + hw_ci = sssnic_ethdev_txq_hw_ci_get(txq); + id_mask = txq->idx_mask; + num_entries = sssnic_ethdev_txq_num_used_entries(txq); + + while (num_entries > 0) { + txe = &txq->txe[ci]; + + /* HW has not consumed enough entries of current packet */ + if (((hw_ci - ci) & id_mask) < txe->num_workq_entries) + break; + + num_entries -= txe->num_workq_entries; + count += txe->num_workq_entries; + ci = (ci + txe->num_workq_entries) & id_mask; + + if (likely(txe->pktmbuf->nb_segs == 1)) { + struct rte_mbuf *pkt = + rte_pktmbuf_prefree_seg(txe->pktmbuf); + txe->pktmbuf = NULL; + + if (unlikely(pkt == NULL)) + continue; + + free_pkts[num_free_pkts++] = pkt; + if (unlikely(pkt->pool != free_pkts[0]->pool || + num_free_pkts >= + SSSNIC_ETHDEV_TX_FREE_BULK)) { + rte_mempool_put_bulk(free_pkts[0]->pool, + (void **)free_pkts, num_free_pkts - 1); + num_free_pkts = 0; + free_pkts[num_free_pkts++] = pkt; + } + } else { + rte_pktmbuf_free(txe->pktmbuf); + txe->pktmbuf = NULL; + } + } + + if (num_free_pkts > 0) + rte_mempool_put_bulk(free_pkts[0]->pool, (void **)free_pkts, + num_free_pkts); + + sssnic_ethdev_txq_consume(txq, count); + + return count; +} + +#define SSSNIC_ETHDEV_TXQ_FUSH_TIMEOUT 3000 /* 3 seconds */ +static int +sssnic_ethdev_txq_flush(struct sssnic_ethdev_txq *txq) +{ + uint64_t timeout; + uint16_t used_entries; + + timeout = rte_get_timer_cycles() + + rte_get_timer_hz() * SSSNIC_ETHDEV_TXQ_FUSH_TIMEOUT / 1000; + + do { + sssnic_ethdev_txq_pktmbufs_cleanup(txq); + used_entries = sssnic_ethdev_txq_num_used_entries(txq); + if (used_entries == 0) + return 0; + + rte_delay_us_sleep(1000); + } while (((long)(rte_get_timer_cycles() - timeout)) < 0); + + PMD_DRV_LOG(ERR, "Flush port:%u txq:%u timeout, used_txq_entries:%u", + txq->port, txq->qid, sssnic_ethdev_txq_num_used_entries(txq)); + + return -ETIMEDOUT; +} + +int +sssnic_ethdev_tx_queue_start(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + struct sssnic_netdev *netdev = SSSNIC_ETHDEV_PRIVATE(ethdev); + + ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + netdev->num_started_txqs++; + + PMD_DRV_LOG(DEBUG, "port %u txq %u started", ethdev->data->port_id, + queue_id); + + return 0; +} + +int +sssnic_ethdev_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + int ret; + struct sssnic_netdev *netdev = SSSNIC_ETHDEV_PRIVATE(ethdev); + struct sssnic_ethdev_txq *txq = ethdev->data->tx_queues[queue_id]; + + ret = sssnic_ethdev_txq_flush(txq); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to flush port %u txq %u", + ethdev->data->port_id, queue_id); + return ret; + } + + ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + netdev->num_started_txqs--; + + PMD_DRV_LOG(DEBUG, "port %u txq %u stopped", ethdev->data->port_id, + queue_id); + + return 0; +} + +int +sssnic_ethdev_tx_queue_all_start(struct rte_eth_dev *ethdev) +{ + uint16_t qid; + uint16_t numq = ethdev->data->nb_tx_queues; + + for (qid = 0; qid < numq; qid++) + sssnic_ethdev_tx_queue_start(ethdev, qid); + + return 0; +} + +void +sssnic_ethdev_tx_queue_all_stop(struct rte_eth_dev *ethdev) +{ + uint16_t qid; + uint16_t numq = ethdev->data->nb_tx_queues; + + for (qid = 0; qid < numq; qid++) + sssnic_ethdev_tx_queue_stop(ethdev, qid); +} diff --git a/drivers/net/sssnic/sssnic_ethdev_tx.h b/drivers/net/sssnic/sssnic_ethdev_tx.h index bd1d721e37..3de9e899a0 100644 --- a/drivers/net/sssnic/sssnic_ethdev_tx.h +++ b/drivers/net/sssnic/sssnic_ethdev_tx.h @@ -23,5 +23,9 @@ int sssnic_ethdev_tx_queue_setup(struct rte_eth_dev *ethdev, void sssnic_ethdev_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id); void sssnic_ethdev_tx_queue_all_release(struct rte_eth_dev *ethdev); +int sssnic_ethdev_tx_queue_start(struct rte_eth_dev *ethdev, uint16_t queue_id); +int sssnic_ethdev_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id); +int sssnic_ethdev_tx_queue_all_start(struct rte_eth_dev *ethdev); +void sssnic_ethdev_tx_queue_all_stop(struct rte_eth_dev *ethdev); #endif /* _SSSNIC_ETHDEV_TX_H_ */