@@ -369,6 +369,64 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
return 0;
}
+static int
+gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ stats->opackets += txq->packets;
+ stats->obytes += txq->bytes;
+ stats->oerrors += txq->errors;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ stats->ipackets += rxq->packets;
+ stats->ibytes += rxq->bytes;
+ stats->ierrors += rxq->errors;
+ stats->rx_nombuf += rxq->no_mbufs;
+ }
+
+ return 0;
+}
+
+static int
+gve_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ txq->packets = 0;
+ txq->bytes = 0;
+ txq->errors = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ rxq->packets = 0;
+ rxq->bytes = 0;
+ rxq->errors = 0;
+ rxq->no_mbufs = 0;
+ }
+
+ return 0;
+}
+
static int
gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
@@ -407,6 +465,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
.rx_queue_release = gve_rx_queue_release,
.tx_queue_release = gve_tx_queue_release,
.link_update = gve_link_update,
+ .stats_get = gve_dev_stats_get,
+ .stats_reset = gve_dev_stats_reset,
.mtu_set = gve_dev_mtu_set,
};
@@ -105,6 +105,11 @@ struct gve_tx_queue {
struct gve_queue_page_list *qpl;
struct gve_tx_iovec *iov_ring;
+ /* stats items */
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+
uint16_t port_id;
uint16_t queue_id;
@@ -156,6 +161,12 @@ struct gve_rx_queue {
/* only valid for GQI_QPL queue format */
struct gve_queue_page_list *qpl;
+ /* stats items */
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t no_mbufs;
+
struct gve_priv *hw;
const struct rte_memzone *qres_mz;
struct gve_queue_resources *qres;
@@ -37,6 +37,7 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
next_avail = 0;
rxq->nb_rx_hold -= delta;
} else {
+ rxq->no_mbufs += nb_desc - next_avail;
dev = &rte_eth_devices[rxq->port_id];
dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
@@ -57,6 +58,7 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
next_avail += nb_refill;
rxq->nb_rx_hold -= nb_refill;
} else {
+ rxq->no_mbufs += nb_desc - next_avail;
dev = &rte_eth_devices[rxq->port_id];
dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
@@ -80,7 +82,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t pkt_len;
uint16_t rx_id;
uint16_t nb_rx;
+ uint64_t bytes;
+ bytes = 0;
nb_rx = 0;
rxq = rx_queue;
rx_id = rxq->rx_tail;
@@ -94,8 +98,10 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (rx_desc->generation != rxq->cur_gen_bit)
break;
- if (unlikely(rx_desc->rx_error))
+ if (unlikely(rx_desc->rx_error)) {
+ rxq->errors++;
continue;
+ }
pkt_len = rx_desc->packet_len;
@@ -120,6 +126,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash);
rx_pkts[nb_rx++] = rxm;
+ bytes += pkt_len;
}
if (nb_rx > 0) {
@@ -128,6 +135,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->next_avail = rx_id_bufq;
gve_rx_refill_dqo(rxq);
+
+ rxq->packets += nb_rx;
+ rxq->bytes += bytes;
}
return nb_rx;
@@ -80,10 +80,12 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_used;
uint16_t tx_id;
uint16_t sw_id;
+ uint64_t bytes;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
+ bytes = 0;
mask = txq->nb_tx_desc - 1;
sw_mask = txq->sw_size - 1;
tx_id = txq->tx_tail;
@@ -118,6 +120,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
tx_id = (tx_id + 1) & mask;
sw_id = (sw_id + 1) & sw_mask;
+ bytes += tx_pkt->pkt_len;
tx_pkt = tx_pkt->next;
} while (tx_pkt);
@@ -141,6 +144,9 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_write32(tx_id, txq->qtx_tail);
txq->tx_tail = tx_id;
txq->sw_tail = sw_id;
+
+ txq->packets += nb_tx;
+ txq->bytes += bytes;
}
return nb_tx;