@@ -9,6 +9,18 @@
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
+static const struct gve_xstats_name_offset tx_xstats_name_offset[] = {
+ { "packets", offsetof(struct gve_tx_stats, packets) },
+ { "bytes", offsetof(struct gve_tx_stats, bytes) },
+ { "errors", offsetof(struct gve_tx_stats, errors) },
+};
+
+static const struct gve_xstats_name_offset rx_xstats_name_offset[] = {
+ { "packets", offsetof(struct gve_rx_stats, packets) },
+ { "bytes", offsetof(struct gve_rx_stats, bytes) },
+ { "errors", offsetof(struct gve_rx_stats, errors) },
+};
+
static void
gve_write_version(uint8_t *driver_version_register)
{
@@ -328,9 +340,9 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (txq == NULL)
continue;
- stats->opackets += txq->packets;
- stats->obytes += txq->bytes;
- stats->oerrors += txq->errors;
+ stats->opackets += txq->stats.packets;
+ stats->obytes += txq->stats.bytes;
+ stats->oerrors += txq->stats.errors;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -338,10 +350,10 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (rxq == NULL)
continue;
- stats->ipackets += rxq->packets;
- stats->ibytes += rxq->bytes;
- stats->ierrors += rxq->errors;
- stats->rx_nombuf += rxq->no_mbufs;
+ stats->ipackets += rxq->stats.packets;
+ stats->ibytes += rxq->stats.bytes;
+ stats->ierrors += rxq->stats.errors;
+ stats->rx_nombuf += rxq->stats.no_mbufs;
}
return 0;
@@ -357,9 +369,7 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
if (txq == NULL)
continue;
- txq->packets = 0;
- txq->bytes = 0;
- txq->errors = 0;
+ memset(&txq->stats, 0, sizeof(txq->stats));
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -367,10 +377,7 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
if (rxq == NULL)
continue;
- rxq->packets = 0;
- rxq->bytes = 0;
- rxq->errors = 0;
- rxq->no_mbufs = 0;
+ memset(&rxq->stats, 0, sizeof(rxq->stats));
}
return 0;
@@ -403,6 +410,112 @@ gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
+static int
+gve_xstats_count(struct rte_eth_dev *dev)
+{
+ uint16_t i, count = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (dev->data->tx_queues[i])
+ count += RTE_DIM(tx_xstats_name_offset);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (dev->data->rx_queues[i])
+ count += RTE_DIM(rx_xstats_name_offset);
+ }
+
+ return count;
+}
+
+static int
+gve_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int size)
+{
+ uint16_t i, count = 0;
+
+ if (!xstats)
+ return (size == 0) ? gve_xstats_count(dev) : -EINVAL;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+
+ if (count >= size)
+ break;
+
+ uint16_t j = 0;
+ const char *stats = (const char *)&txq->stats;
+ for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) {
+ xstats[count].id = count;
+ xstats[count].value = *(const uint64_t *)
+ (stats + tx_xstats_name_offset[j].offset);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+
+ if (count >= size)
+ break;
+
+ uint16_t j = 0;
+ const char *stats = (const char *)&rxq->stats;
+ for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) {
+ xstats[count].id = count;
+ xstats[count].value = *(const uint64_t *)
+ (stats + rx_xstats_name_offset[j].offset);
+ }
+ }
+
+ return count;
+}
+
+static int
+gve_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
+{
+ uint16_t i, count = 0;
+
+ if (!xstats_names)
+ return (size == 0) ? gve_xstats_count(dev) : -EINVAL;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+
+ if (count >= size)
+ break;
+
+ uint16_t j = 0;
+ for (; j < RTE_DIM(tx_xstats_name_offset); j++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "tx_q%u_%s", i, tx_xstats_name_offset[j].name);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+
+ if (count >= size)
+ break;
+
+ uint16_t j = 0;
+ for (; j < RTE_DIM(rx_xstats_name_offset); j++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "rx_q%u_%s", i, rx_xstats_name_offset[j].name);
+ }
+
+ return count;
+}
+
static const struct eth_dev_ops gve_eth_dev_ops = {
.dev_configure = gve_dev_configure,
.dev_start = gve_dev_start,
@@ -417,6 +530,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
.stats_get = gve_dev_stats_get,
.stats_reset = gve_dev_stats_reset,
.mtu_set = gve_dev_mtu_set,
+ .xstats_get = gve_xstats_get,
+ .xstats_get_names = gve_xstats_get_names,
};
static void
@@ -67,6 +67,24 @@ struct gve_tx_iovec {
uint32_t iov_len;
};
+struct gve_tx_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+};
+
+struct gve_rx_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t no_mbufs;
+};
+
+struct gve_xstats_name_offset {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
struct gve_tx_queue {
volatile union gve_tx_desc *tx_desc_ring;
const struct rte_memzone *mz;
@@ -93,9 +111,7 @@ struct gve_tx_queue {
struct gve_tx_iovec *iov_ring;
/* stats items */
- uint64_t packets;
- uint64_t bytes;
- uint64_t errors;
+ struct gve_tx_stats stats;
uint16_t port_id;
uint16_t queue_id;
@@ -136,10 +152,7 @@ struct gve_rx_queue {
struct gve_queue_page_list *qpl;
/* stats items */
- uint64_t packets;
- uint64_t bytes;
- uint64_t errors;
- uint64_t no_mbufs;
+ struct gve_rx_stats stats;
struct gve_priv *hw;
const struct rte_memzone *qres_mz;
@@ -27,7 +27,7 @@ gve_rx_refill(struct gve_rx_queue *rxq)
rxq->sw_ring[idx + i] = nmb;
}
if (i != nb_alloc) {
- rxq->no_mbufs += nb_alloc - i;
+ rxq->stats.no_mbufs += nb_alloc - i;
nb_alloc = i;
}
}
@@ -62,7 +62,7 @@ gve_rx_refill(struct gve_rx_queue *rxq)
rxq->sw_ring[idx + i] = nmb;
}
if (i != nb_alloc) {
- rxq->no_mbufs += nb_alloc - i;
+ rxq->stats.no_mbufs += nb_alloc - i;
nb_alloc = i;
}
}
@@ -106,7 +106,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
if (rxd->flags_seq & GVE_RXF_ERR) {
- rxq->errors++;
+ rxq->stats.errors++;
continue;
}
@@ -154,8 +154,8 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
gve_rx_refill(rxq);
if (nb_rx) {
- rxq->packets += nb_rx;
- rxq->bytes += bytes;
+ rxq->stats.packets += nb_rx;
+ rxq->stats.bytes += bytes;
}
return nb_rx;
@@ -366,9 +366,9 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->tx_tail = tx_tail;
txq->sw_tail = sw_id;
- txq->packets += nb_tx;
- txq->bytes += bytes;
- txq->errors += nb_pkts - nb_tx;
+ txq->stats.packets += nb_tx;
+ txq->stats.bytes += bytes;
+ txq->stats.errors += nb_pkts - nb_tx;
}
return nb_tx;
@@ -455,8 +455,9 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
txq->tx_tail = tx_tail;
- txq->packets += nb_tx;
- txq->bytes += bytes;
+ txq->stats.packets += nb_tx;
+ txq->stats.bytes += bytes;
+ txq->stats.errors += nb_pkts - nb_tx;
}
return nb_tx;