From patchwork Tue Oct 27 06:23:15 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 82269 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 60FD0A04B5; Tue, 27 Oct 2020 07:21:56 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 26DB92BEA; Tue, 27 Oct 2020 07:21:41 +0100 (CET) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by dpdk.org (Postfix) with ESMTP id E976629C6 for ; Tue, 27 Oct 2020 07:21:37 +0100 (CET) X-QQ-mid: bizesmtp19t1603779693trnopp8i Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Tue, 27 Oct 2020 14:21:31 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: tc6S58rF1JCIAPPka+3+d1DiTmKy9+dXd9zDcDZnbOrAX63seQ91KEy+map1I C/wAVpdy7629PdzslTrAFV4y8YUXUy3HMrbdiUvQwNghbC4UvXQ+Ib6wyfYcrRcWVKQvU3I vUTM9jLWLSOdBUYT1MDPvnk2z2CAh7bm480Vv2OR9KzUOfatmFLylS1+TUXeHhncPSnliTQ KnhB41w57375A6gnih89O+e5cep/uy2Jbto4ml3MQxt9SC4h8afP+XZIixlTmIZHWpXBUcC BlphFf3nIxW0zLT1al6e5/gA2a9Cnigp3gps5yJvXZMue40Bd2WmMAM0bIMzDsqLo7zRC1L NNs2jMk6I79hlU2Yec= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Tue, 27 Oct 2020 14:23:15 +0800 Message-Id: <20201027062315.242576-2-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201027062315.242576-1-jiawenwu@trustnetic.com> References: <20201027062315.242576-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 2/2] net/txgbe: add Tx done cleanup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for API rte_eth_tx_done_cleanup(). Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_ethdev.c | 1 + drivers/net/txgbe/txgbe_rxtx.c | 89 ++++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_rxtx.h | 1 + 3 files changed, 91 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index d4014d5c4..87824b140 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -4253,6 +4253,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .timesync_adjust_time = txgbe_timesync_adjust_time, .timesync_read_time = txgbe_timesync_read_time, .timesync_write_time = txgbe_timesync_write_time, + .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd); diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index e57c87759..07bfaf989 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -1951,6 +1951,95 @@ txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq) } } +static int +txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt) +{ + struct txgbe_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (pkt_cnt < free_cnt) { + if (txgbe_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_free_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh) + break; + + n = txgbe_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +int +txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) +{ + struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue; + if (txq->offloads == 0 && + txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) + return txgbe_tx_done_cleanup_simple(txq, free_cnt); + + return txgbe_tx_done_cleanup_full(txq, free_cnt); +} + static void __rte_cold txgbe_tx_free_swring(struct txgbe_tx_queue *txq) { diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index 8c36698c3..120c2547f 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -407,6 +407,7 @@ struct txgbe_txq_ops { void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq); void txgbe_set_rx_function(struct rte_eth_dev *dev); +int txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt); uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev); uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);