From patchwork Thu Jan 9 10:38:19 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chenxu Di X-Patchwork-Id: 64327 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1FAC5A046B; Thu, 9 Jan 2020 11:38:59 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 119691DC16; Thu, 9 Jan 2020 11:38:53 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 9150B1DC04 for ; Thu, 9 Jan 2020 11:38:49 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 09 Jan 2020 02:38:48 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,413,1571727600"; d="scan'208";a="254553001" Received: from intel.sh.intel.com ([10.239.255.149]) by fmsmga002.fm.intel.com with ESMTP; 09 Jan 2020 02:38:47 -0800 From: Chenxu Di To: dev@dpdk.org Cc: Yang Qiming , Chenxu Di Date: Thu, 9 Jan 2020 10:38:19 +0000 Message-Id: <20200109103822.89011-2-chenxux.di@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200109103822.89011-1-chenxux.di@intel.com> References: <20191203055134.72874-1-chenxux.di@intel.com> <20200109103822.89011-1-chenxux.di@intel.com> Subject: [dpdk-dev] [PATCH v7 1/4] net/i40e: cleanup Tx buffers X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support to the i40e driver for the API rte_eth_tx_done_cleanup to force free consumed buffers on Tx ring. Signed-off-by: Chenxu Di --- drivers/net/i40e/i40e_ethdev.c | 3 + drivers/net/i40e/i40e_ethdev_vf.c | 3 + drivers/net/i40e/i40e_rxtx.c | 151 ++++++++++++++++++++++++++++++ drivers/net/i40e/i40e_rxtx.h | 8 ++ 4 files changed, 165 insertions(+) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 5999c964b..e0b071891 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -522,6 +522,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, .tm_ops_get = i40e_tm_ops_get, + .tx_done_cleanup = i40e_tx_done_cleanup, }; /* store statistics names and its offset in stats structure */ @@ -1358,6 +1359,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) dev->tx_pkt_burst = i40e_xmit_pkts; dev->tx_pkt_prepare = i40e_prep_pkts; + i40e_set_tx_done_cleanup_func(i40e_tx_done_cleanup_scalar); + /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different * RX function */ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 5dba0928b..3dcc9434c 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -215,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get, .mtu_set = i40evf_dev_mtu_set, .mac_addr_set = i40evf_set_default_mac_addr, + .tx_done_cleanup = i40e_tx_done_cleanup, }; /* @@ -1473,6 +1474,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = &i40e_recv_pkts; eth_dev->tx_pkt_burst = &i40e_xmit_pkts; + i40e_set_tx_done_cleanup_func(i40e_tx_done_cleanup_scalar); + /* * For secondary processes, we don't initialise any further as primary * has already done this work. diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 17dc8c78f..dfbca06b6 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -2455,6 +2455,154 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) } } +static i40e_tx_done_cleanup_t i40e_tx_done_cleanup_op; + +int +i40e_tx_done_cleanup_scalar(struct i40e_tx_queue *txq, + uint32_t free_cnt) +{ + uint32_t pkt_cnt; + uint16_t i; + uint16_t tx_last; + uint16_t tx_id; + uint16_t nb_tx_to_clean; + uint16_t nb_tx_free_last; + struct i40e_tx_entry *swr_ring = txq->sw_ring; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0) + if (i40e_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (tx_id == tx_last || txq->tx_rs_thresh + > txq->nb_tx_desc - txq->nb_tx_free) + break; + + if (pkt_cnt < free_cnt) { + if (i40e_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + PMD_TX_FREE_LOG(DEBUG, + "Free %u Packets successfully " + "(port=%d queue=%d)", + pkt_cnt, txq->port_id, txq->queue_id); + + return (int)pkt_cnt; +} + +int +i40e_tx_done_cleanup_simple(struct i40e_tx_queue *txq, + uint32_t free_cnt) +{ + uint16_t i; + uint16_t tx_first; + uint16_t tx_id; + uint32_t pkt_cnt; + struct i40e_tx_entry *swr_ring = txq->sw_ring; + + /* Start free mbuf from tx_first */ + tx_first = txq->tx_next_dd - (txq->tx_rs_thresh - 1); + tx_id = tx_first; + + /* while free_cnt is 0, + * suppose one mbuf per packet, + * try to free packets as many as possible + */ + if (free_cnt == 0) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count freeable packets */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) + break; + + if (!i40e_tx_free_bufs(txq)) + break; + + for (i = 0; i != txq->tx_rs_thresh && + tx_id != tx_first; i++) { + /* last segment in the packet, + * increment packet count + */ + pkt_cnt += (tx_id == swr_ring[tx_id].last_id); + tx_id = swr_ring[tx_id].next_id; + } + + if (tx_id == tx_first) + break; + } + + PMD_TX_FREE_LOG(DEBUG, + "Free %u packets successfully " + "(port=%d queue=%d)", + pkt_cnt, txq->port_id, txq->queue_id); + + return (int)pkt_cnt; +} + +int +i40e_tx_done_cleanup_vec(struct i40e_tx_queue *txq __rte_unused, + uint32_t free_cnt __rte_unused) +{ + return -ENOTSUP; +} +int +i40e_tx_done_cleanup(void *txq, uint32_t free_cnt) +{ + i40e_tx_done_cleanup_t func = i40e_get_tx_done_cleanup_func(); + + if (!func) + return -ENOTSUP; + + return func(txq, free_cnt); +} + +void +i40e_set_tx_done_cleanup_func(i40e_tx_done_cleanup_t fn) +{ + i40e_tx_done_cleanup_op = fn; +} + +i40e_tx_done_cleanup_t +i40e_get_tx_done_cleanup_func(void) +{ + return i40e_tx_done_cleanup_op; +} + void i40e_reset_tx_queue(struct i40e_tx_queue *txq) { @@ -3139,15 +3287,18 @@ i40e_set_tx_function(struct rte_eth_dev *dev) else dev->tx_pkt_burst = i40e_get_recommend_tx_vec(); + i40e_set_tx_done_cleanup_func(i40e_tx_done_cleanup_vec); } else { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_simple; + i40e_set_tx_done_cleanup_func(i40e_tx_done_cleanup_simple); } dev->tx_pkt_prepare = NULL; } else { PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts; dev->tx_pkt_prepare = i40e_prep_pkts; + i40e_set_tx_done_cleanup_func(i40e_tx_done_cleanup_scalar); } } diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 2106bb355..ab2c0ffd0 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -173,6 +173,8 @@ union i40e_tx_offload { uint64_t outer_l3_len:16; /**< outer L3 Header Length */ }; }; +typedef int (*i40e_tx_done_cleanup_t)(struct i40e_tx_queue *txq, + uint32_t free_cnt); int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); @@ -212,6 +214,12 @@ void i40e_dev_free_queues(struct rte_eth_dev *dev); void i40e_reset_rx_queue(struct i40e_rx_queue *rxq); void i40e_reset_tx_queue(struct i40e_tx_queue *txq); void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq); +void i40e_set_tx_done_cleanup_func(i40e_tx_done_cleanup_t fn); +i40e_tx_done_cleanup_t i40e_get_tx_done_cleanup_func(void); +int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt); +int i40e_tx_done_cleanup_scalar(struct i40e_tx_queue *txq, uint32_t free_cnt); +int i40e_tx_done_cleanup_vec(struct i40e_tx_queue *txq, uint32_t free_cnt); +int i40e_tx_done_cleanup_simple(struct i40e_tx_queue *txq, uint32_t free_cnt); int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq); void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);