[v2,26/26] net/ngbe: add Tx done cleanup
Checks
Commit Message
Add support for API rte_eth_tx_done_cleanup().
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/ngbe/ngbe_ethdev.c | 1 +
drivers/net/ngbe/ngbe_rxtx.c | 89 ++++++++++++++++++++++++++++++++++
drivers/net/ngbe/ngbe_rxtx.h | 1 +
3 files changed, 91 insertions(+)
Comments
On 10/21/2021 10:50 AM, Jiawen Wu wrote:
> Add support for API rte_eth_tx_done_cleanup().
>
> Signed-off-by: Jiawen Wu<jiawenwu@trustnetic.com>
> ---
> drivers/net/ngbe/ngbe_ethdev.c | 1 +
> drivers/net/ngbe/ngbe_rxtx.c | 89 ++++++++++++++++++++++++++++++++++
> drivers/net/ngbe/ngbe_rxtx.h | 1 +
> 3 files changed, 91 insertions(+)
Need to update ngbe.ini for 'Free Tx mbuf on demand' feature,
adding following in next-net:
diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini
index b38b4b46d775..2701c5f05145 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -7,6 +7,7 @@
Speed capabilities = Y
Link status = Y
Link status event = Y
+Free Tx mbuf on demand = Y
Queue start/stop = Y
Burst mode info = Y
MTU update = Y
@@ -3103,6 +3103,7 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
.timesync_adjust_time = ngbe_timesync_adjust_time,
.timesync_read_time = ngbe_timesync_read_time,
.timesync_write_time = ngbe_timesync_write_time,
+ .tx_done_cleanup = ngbe_dev_tx_done_cleanup,
};
RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
@@ -1713,6 +1713,95 @@ ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
}
}
+static int
+ngbe_tx_done_cleanup_full(struct ngbe_tx_queue *txq, uint32_t free_cnt)
+{
+ struct ngbe_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && ngbe_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (pkt_cnt < free_cnt) {
+ if (ngbe_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+static int
+ngbe_tx_done_cleanup_simple(struct ngbe_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+ break;
+
+ n = ngbe_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+int
+ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+ struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
+ if (txq->offloads == 0 &&
+ txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST)
+ return ngbe_tx_done_cleanup_simple(txq, free_cnt);
+
+ return ngbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
static void
ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
{
@@ -368,6 +368,7 @@ struct ngbe_txq_ops {
void ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq);
void ngbe_set_rx_function(struct rte_eth_dev *dev);
+int ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);