@@ -91,6 +91,10 @@ struct gve_tx_queue {
struct gve_queue_page_list *qpl;
struct gve_tx_iovec *iov_ring;
+ /* Stats */
+ uint64_t packets;
+ uint64_t bytes;
+
uint16_t port_id;
uint16_t queue_id;
@@ -129,6 +133,12 @@ struct gve_rx_queue {
/* only valid for GQI_QPL queue format */
struct gve_queue_page_list *qpl;
+ /* stats */
+ uint64_t no_mbufs;
+ uint64_t errors;
+ uint64_t packets;
+ uint64_t bytes;
+
struct gve_priv *hw;
const struct rte_memzone *qres_mz;
struct gve_queue_resources *qres;
@@ -326,6 +326,73 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
return 0;
}
+static int
+gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ stats->opackets += txq->packets;
+ stats->obytes += txq->bytes;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->packets;
+ stats->q_obytes[i] = txq->bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ stats->ipackets += rxq->packets;
+ stats->ibytes += rxq->bytes;
+ stats->ierrors += rxq->errors;
+ stats->rx_nombuf += rxq->no_mbufs;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->packets;
+ stats->q_ibytes[i] = rxq->bytes;
+ stats->q_errors[i] = rxq->errors;
+ }
+ }
+
+ return 0;
+}
+
+static int
+gve_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ txq->packets = 0;
+ txq->bytes = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ rxq->packets = 0;
+ rxq->bytes = 0;
+ rxq->no_mbufs = 0;
+ rxq->errors = 0;
+ }
+
+ return 0;
+}
+
static int
gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
@@ -363,6 +430,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
.rx_queue_setup = gve_rx_queue_setup,
.tx_queue_setup = gve_tx_queue_setup,
.link_update = gve_link_update,
+ .stats_get = gve_dev_stats_get,
+ .stats_reset = gve_dev_stats_reset,
.mtu_set = gve_dev_mtu_set,
};
@@ -26,8 +26,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
break;
rxq->sw_ring[idx + i] = nmb;
}
- if (i != nb_alloc)
+ if (i != nb_alloc) {
+ rxq->no_mbufs += nb_alloc - i;
nb_alloc = i;
+ }
}
rxq->nb_avail -= nb_alloc;
next_avail += nb_alloc;
@@ -88,6 +90,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t rx_id = rxq->rx_tail;
struct rte_mbuf *rxe;
uint16_t nb_rx, len;
+ uint64_t bytes = 0;
uint64_t addr;
rxr = rxq->rx_desc_ring;
@@ -97,8 +100,10 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
break;
- if (rxd->flags_seq & GVE_RXF_ERR)
+ if (rxd->flags_seq & GVE_RXF_ERR) {
+ rxq->errors++;
continue;
+ }
len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
rxe = rxq->sw_ring[rx_id];
@@ -137,6 +142,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_id = 0;
rx_pkts[nb_rx] = rxe;
+ bytes += len;
}
rxq->nb_avail += nb_rx;
@@ -145,6 +151,11 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (rxq->nb_avail > rxq->free_thresh)
gve_rx_refill(rxq);
+ if (nb_rx) {
+ rxq->packets += nb_rx;
+ rxq->bytes += bytes;
+ }
+
return nb_rx;
}
@@ -260,6 +260,7 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct rte_mbuf *tx_pkt, *first;
uint16_t sw_id = txq->sw_tail;
uint16_t nb_used, i;
+ uint64_t bytes = 0;
uint16_t nb_tx = 0;
uint32_t hlen;
@@ -352,6 +353,8 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->nb_free -= nb_used;
txq->sw_nb_free -= first->nb_segs;
tx_tail += nb_used;
+
+ bytes += first->pkt_len;
}
end_of_tx:
@@ -359,6 +362,9 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
txq->tx_tail = tx_tail;
txq->sw_tail = sw_id;
+
+ txq->packets += nb_tx;
+ txq->bytes += bytes;
}
return nb_tx;
@@ -377,6 +383,7 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct rte_mbuf *tx_pkt, *first;
uint16_t nb_used, hlen, i;
uint64_t ol_flags, addr;
+ uint64_t bytes = 0;
uint16_t nb_tx = 0;
txr = txq->tx_desc_ring;
@@ -435,12 +442,17 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->nb_free -= nb_used;
tx_tail += nb_used;
+
+ bytes += first->pkt_len;
}
end_of_tx:
if (nb_tx) {
rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
txq->tx_tail = tx_tail;
+
+ txq->packets += nb_tx;
+ txq->bytes += bytes;
}
return nb_tx;