[v9,4/8] net/af_xdp: use generic SW stats

Message ID 20240521201801.126886-5-stephen@networkplumber.org (mailing list archive)
State New
Delegated to: Thomas Monjalon
Headers
Series Common statistics for SW PMD's |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Stephen Hemminger May 21, 2024, 8:16 p.m. UTC
  Use common code for all SW stats.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/af_xdp/rte_eth_af_xdp.c | 85 ++++++++---------------------
 1 file changed, 22 insertions(+), 63 deletions(-)
  

Patch

diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 6ba455bb9b..c563621798 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -20,6 +20,7 @@ 
 #include <rte_ethdev.h>
 #include <ethdev_driver.h>
 #include <ethdev_vdev.h>
+#include <ethdev_swstats.h>
 #include <rte_kvargs.h>
 #include <bus_vdev_driver.h>
 #include <rte_string_fns.h>
@@ -120,19 +121,13 @@  struct xsk_umem_info {
 	uint32_t max_xsks;
 };
 
-struct rx_stats {
-	uint64_t rx_pkts;
-	uint64_t rx_bytes;
-	uint64_t rx_dropped;
-};
-
 struct pkt_rx_queue {
 	struct xsk_ring_cons rx;
 	struct xsk_umem_info *umem;
 	struct xsk_socket *xsk;
 	struct rte_mempool *mb_pool;
 
-	struct rx_stats stats;
+	struct rte_eth_counters stats;
 
 	struct xsk_ring_prod fq;
 	struct xsk_ring_cons cq;
@@ -143,17 +138,11 @@  struct pkt_rx_queue {
 	int busy_budget;
 };
 
-struct tx_stats {
-	uint64_t tx_pkts;
-	uint64_t tx_bytes;
-	uint64_t tx_dropped;
-};
-
 struct pkt_tx_queue {
 	struct xsk_ring_prod tx;
 	struct xsk_umem_info *umem;
 
-	struct tx_stats stats;
+	struct rte_eth_counters stats;
 
 	struct pkt_rx_queue *pair;
 	int xsk_queue_idx;
@@ -369,9 +358,7 @@  af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	xsk_ring_cons__release(rx, nb_pkts);
 	(void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
 
-	/* statistics */
-	rxq->stats.rx_pkts += nb_pkts;
-	rxq->stats.rx_bytes += rx_bytes;
+	rte_eth_count_packets(&rxq->stats, nb_pkts, rx_bytes);
 
 	return nb_pkts;
 }
@@ -429,10 +416,7 @@  af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 
 	xsk_ring_cons__release(rx, nb_pkts);
-
-	/* statistics */
-	rxq->stats.rx_pkts += nb_pkts;
-	rxq->stats.rx_bytes += rx_bytes;
+	rte_eth_count_packets(&rxq->stats, nb_pkts, rx_bytes);
 
 	return nb_pkts;
 }
@@ -558,6 +542,7 @@  af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					umem->mb_pool->header_size;
 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 			desc->addr = addr | offset;
+			tx_bytes += mbuf->pkt_len;
 			count++;
 		} else {
 			struct rte_mbuf *local_mbuf =
@@ -585,20 +570,17 @@  af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			desc->addr = addr | offset;
 			rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
 					desc->len);
+			tx_bytes += mbuf->pkt_len;
 			rte_pktmbuf_free(mbuf);
 			count++;
 		}
-
-		tx_bytes += mbuf->pkt_len;
 	}
 
 out:
 	xsk_ring_prod__submit(&txq->tx, count);
 	kick_tx(txq, cq);
 
-	txq->stats.tx_pkts += count;
-	txq->stats.tx_bytes += tx_bytes;
-	txq->stats.tx_dropped += nb_pkts - count;
+	rte_eth_count_packets(&txq->stats, count, tx_bytes);
 
 	return count;
 }
@@ -648,8 +630,7 @@  af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 	kick_tx(txq, cq);
 
-	txq->stats.tx_pkts += nb_pkts;
-	txq->stats.tx_bytes += tx_bytes;
+	rte_eth_count_packets(&txq->stats, nb_pkts, tx_bytes);
 
 	return nb_pkts;
 }
@@ -847,39 +828,26 @@  eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 static int
 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-	struct pmd_internals *internals = dev->data->dev_private;
 	struct pmd_process_private *process_private = dev->process_private;
-	struct xdp_statistics xdp_stats;
-	struct pkt_rx_queue *rxq;
-	struct pkt_tx_queue *txq;
-	socklen_t optlen;
-	int i, ret, fd;
+	unsigned int i;
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		optlen = sizeof(struct xdp_statistics);
-		rxq = &internals->rx_queues[i];
-		txq = rxq->pair;
-		stats->q_ipackets[i] = rxq->stats.rx_pkts;
-		stats->q_ibytes[i] = rxq->stats.rx_bytes;
+	rte_eth_counters_stats_get(dev, offsetof(struct pkt_tx_queue, stats),
+				   offsetof(struct pkt_rx_queue, stats), stats);
 
-		stats->q_opackets[i] = txq->stats.tx_pkts;
-		stats->q_obytes[i] = txq->stats.tx_bytes;
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct xdp_statistics xdp_stats;
+		socklen_t optlen = sizeof(xdp_stats);
+		int fd;
 
-		stats->ipackets += stats->q_ipackets[i];
-		stats->ibytes += stats->q_ibytes[i];
-		stats->imissed += rxq->stats.rx_dropped;
-		stats->oerrors += txq->stats.tx_dropped;
 		fd = process_private->rxq_xsk_fds[i];
-		ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS,
-					   &xdp_stats, &optlen) : -1;
-		if (ret != 0) {
+		if (fd < 0)
+			continue;
+		if (getsockopt(fd, SOL_XDP, XDP_STATISTICS,
+			       &xdp_stats, &optlen)  < 0) {
 			AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
 			return -1;
 		}
 		stats->imissed += xdp_stats.rx_dropped;
-
-		stats->opackets += stats->q_opackets[i];
-		stats->obytes += stats->q_obytes[i];
 	}
 
 	return 0;
@@ -888,17 +856,8 @@  eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 static int
 eth_stats_reset(struct rte_eth_dev *dev)
 {
-	struct pmd_internals *internals = dev->data->dev_private;
-	int i;
-
-	for (i = 0; i < internals->queue_cnt; i++) {
-		memset(&internals->rx_queues[i].stats, 0,
-					sizeof(struct rx_stats));
-		memset(&internals->tx_queues[i].stats, 0,
-					sizeof(struct tx_stats));
-	}
-
-	return 0;
+	return rte_eth_counters_reset(dev, offsetof(struct pkt_tx_queue, stats),
+				      offsetof(struct pkt_rx_queue, stats));
 }
 
 #ifdef RTE_NET_AF_XDP_LIBBPF_XDP_ATTACH