[v4,9/9] net/gve: add support for stats
Checks
Commit Message
Update stats add support of dev_ops stats_get/reset.
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
doc/guides/nics/features/gve.ini | 2 +
drivers/net/gve/gve_ethdev.c | 71 ++++++++++++++++++++++++++++++++
drivers/net/gve/gve_ethdev.h | 11 +++++
drivers/net/gve/gve_rx.c | 15 ++++++-
drivers/net/gve/gve_tx.c | 13 ++++++
5 files changed, 110 insertions(+), 2 deletions(-)
Comments
On 9/27/2022 8:32 AM, Junfeng Guo wrote:
>
> Update stats add support of dev_ops stats_get/reset.
>
> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> ---
> doc/guides/nics/features/gve.ini | 2 +
> drivers/net/gve/gve_ethdev.c | 71 ++++++++++++++++++++++++++++++++
> drivers/net/gve/gve_ethdev.h | 11 +++++
> drivers/net/gve/gve_rx.c | 15 ++++++-
> drivers/net/gve/gve_tx.c | 13 ++++++
> 5 files changed, 110 insertions(+), 2 deletions(-)
>
> diff --git a/doc/guides/nics/features/gve.ini b/doc/guides/nics/features/gve.ini
> index cdc46b08a3..180408aa80 100644
> --- a/doc/guides/nics/features/gve.ini
> +++ b/doc/guides/nics/features/gve.ini
> @@ -10,6 +10,8 @@ MTU update = Y
> TSO = Y
> RSS hash = Y
> L4 checksum offload = Y
> +Basic stats = Y
> +Stats per queue = Y
"stats per queue" is something else, agree that it is bad naming, please
check features.rst file.
> Linux = Y
> x86-32 = Y
> x86-64 = Y
> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
> index b9b8e51b02..cd474b8128 100644
> --- a/drivers/net/gve/gve_ethdev.c
> +++ b/drivers/net/gve/gve_ethdev.c
> @@ -328,6 +328,75 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> return 0;
> }
>
> +static int
> +gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
> +{
> + uint16_t i;
> +
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + struct gve_tx_queue *txq = dev->data->tx_queues[i];
> + if (txq == NULL)
> + continue;
> +
> + stats->opackets += txq->packets;
> + stats->obytes += txq->bytes;
> + stats->oerrors += txq->errors;
> +
> + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
> + stats->q_opackets[i] = txq->packets;
> + stats->q_obytes[i] = txq->bytes;
Queue stats update is moved to xstat [1], we are waiting existing PMDs
to adopt it but for new drivers it is better to implement new method.
Can you please either drop queue stats completely, or implement it via
xstats?
[1]
https://elixir.bootlin.com/dpdk/v22.07/source/doc/guides/rel_notes/deprecation.rst#L118
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@amd.com>
> Sent: Thursday, October 6, 2022 22:26
> To: Guo, Junfeng <junfeng.guo@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: ferruh.yigit@xilinx.com; dev@dpdk.org; Li, Xiaoyun
> <xiaoyun.li@intel.com>; awogbemila@google.com; Richardson, Bruce
> <bruce.richardson@intel.com>; Lin, Xueqin <xueqin.lin@intel.com>
> Subject: Re: [PATCH v4 9/9] net/gve: add support for stats
>
> On 9/27/2022 8:32 AM, Junfeng Guo wrote:
>
> >
> > Update stats add support of dev_ops stats_get/reset.
> >
> > Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> > Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> > ---
> > doc/guides/nics/features/gve.ini | 2 +
> > drivers/net/gve/gve_ethdev.c | 71
> ++++++++++++++++++++++++++++++++
> > drivers/net/gve/gve_ethdev.h | 11 +++++
> > drivers/net/gve/gve_rx.c | 15 ++++++-
> > drivers/net/gve/gve_tx.c | 13 ++++++
> > 5 files changed, 110 insertions(+), 2 deletions(-)
> >
> > diff --git a/doc/guides/nics/features/gve.ini
> b/doc/guides/nics/features/gve.ini
> > index cdc46b08a3..180408aa80 100644
> > --- a/doc/guides/nics/features/gve.ini
> > +++ b/doc/guides/nics/features/gve.ini
> > @@ -10,6 +10,8 @@ MTU update = Y
> > TSO = Y
> > RSS hash = Y
> > L4 checksum offload = Y
> > +Basic stats = Y
> > +Stats per queue = Y
>
> "stats per queue" is something else, agree that it is bad naming, please
> check features.rst file.
Sure, will check the features file and update accordingly. Thanks!
>
> > Linux = Y
> > x86-32 = Y
> > x86-64 = Y
> > diff --git a/drivers/net/gve/gve_ethdev.c
> b/drivers/net/gve/gve_ethdev.c
> > index b9b8e51b02..cd474b8128 100644
> > --- a/drivers/net/gve/gve_ethdev.c
> > +++ b/drivers/net/gve/gve_ethdev.c
> > @@ -328,6 +328,75 @@ gve_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> > return 0;
> > }
> >
> > +static int
> > +gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
> > +{
> > + uint16_t i;
> > +
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + struct gve_tx_queue *txq = dev->data->tx_queues[i];
> > + if (txq == NULL)
> > + continue;
> > +
> > + stats->opackets += txq->packets;
> > + stats->obytes += txq->bytes;
> > + stats->oerrors += txq->errors;
> > +
> > + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
> > + stats->q_opackets[i] = txq->packets;
> > + stats->q_obytes[i] = txq->bytes;
>
> Queue stats update is moved to xstat [1], we are waiting existing PMDs
> to adopt it but for new drivers it is better to implement new method.
>
> Can you please either drop queue stats completely, or implement it via
> xstats?
>
> [1]
> https://elixir.bootlin.com/dpdk/v22.07/source/doc/guides/rel_notes/dep
> recation.rst#L118
Sure, thanks for reminding this!
Seems that it would be better to drop the stats feature at this point since lack
of time and bandwidth... But we can still measure the performance via other
tools like TRex. And we can plan the implementation of the new method in
the coming release. Thanks!
@@ -10,6 +10,8 @@ MTU update = Y
TSO = Y
RSS hash = Y
L4 checksum offload = Y
+Basic stats = Y
+Stats per queue = Y
Linux = Y
x86-32 = Y
x86-64 = Y
@@ -328,6 +328,75 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
return 0;
}
+static int
+gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ stats->opackets += txq->packets;
+ stats->obytes += txq->bytes;
+ stats->oerrors += txq->errors;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->packets;
+ stats->q_obytes[i] = txq->bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ stats->ipackets += rxq->packets;
+ stats->ibytes += rxq->bytes;
+ stats->ierrors += rxq->errors;
+ stats->rx_nombuf += rxq->no_mbufs;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->packets;
+ stats->q_ibytes[i] = rxq->bytes;
+ stats->q_errors[i] = rxq->errors;
+ }
+ }
+
+ return 0;
+}
+
+static int
+gve_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ txq->packets = 0;
+ txq->bytes = 0;
+ txq->errors = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ rxq->packets = 0;
+ rxq->bytes = 0;
+ rxq->no_mbufs = 0;
+ rxq->errors = 0;
+ }
+
+ return 0;
+}
+
static int
gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
@@ -365,6 +434,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
.rx_queue_setup = gve_rx_queue_setup,
.tx_queue_setup = gve_tx_queue_setup,
.link_update = gve_link_update,
+ .stats_get = gve_dev_stats_get,
+ .stats_reset = gve_dev_stats_reset,
.mtu_set = gve_dev_mtu_set,
};
@@ -76,6 +76,11 @@ struct gve_tx_queue {
struct gve_queue_page_list *qpl;
struct gve_tx_iovec *iov_ring;
+ /* Stats */
+ uint64_t errors;
+ uint64_t packets;
+ uint64_t bytes;
+
uint16_t port_id;
uint16_t queue_id;
@@ -114,6 +119,12 @@ struct gve_rx_queue {
/* only valid for GQI_QPL queue format */
struct gve_queue_page_list *qpl;
+ /* stats */
+ uint64_t no_mbufs;
+ uint64_t errors;
+ uint64_t packets;
+ uint64_t bytes;
+
struct gve_priv *hw;
const struct rte_memzone *qres_mz;
struct gve_queue_resources *qres;
@@ -26,8 +26,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
break;
rxq->sw_ring[idx + i] = nmb;
}
- if (i != nb_alloc)
+ if (i != nb_alloc) {
+ rxq->no_mbufs += nb_alloc - i;
nb_alloc = i;
+ }
}
rxq->nb_avail -= nb_alloc;
next_avail += nb_alloc;
@@ -88,6 +90,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t rx_id = rxq->rx_tail;
struct rte_mbuf *rxe;
uint16_t nb_rx, len;
+ uint64_t bytes = 0;
uint64_t addr;
rxr = rxq->rx_desc_ring;
@@ -97,8 +100,10 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
break;
- if (rxd->flags_seq & GVE_RXF_ERR)
+ if (rxd->flags_seq & GVE_RXF_ERR) {
+ rxq->errors++;
continue;
+ }
len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
rxe = rxq->sw_ring[rx_id];
@@ -137,6 +142,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_id = 0;
rx_pkts[nb_rx] = rxe;
+ bytes += len;
}
rxq->nb_avail += nb_rx;
@@ -145,6 +151,11 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (rxq->nb_avail > rxq->free_thresh)
gve_rx_refill(rxq);
+ if (nb_rx) {
+ rxq->packets += nb_rx;
+ rxq->bytes += bytes;
+ }
+
return nb_rx;
}
@@ -260,6 +260,7 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct rte_mbuf *tx_pkt, *first;
uint16_t sw_id = txq->sw_tail;
uint16_t nb_used, i;
+ uint64_t bytes = 0;
uint16_t nb_tx = 0;
uint32_t hlen;
@@ -355,6 +356,8 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->nb_free -= nb_used;
txq->sw_nb_free -= first->nb_segs;
tx_tail += nb_used;
+
+ bytes += first->pkt_len;
}
end_of_tx:
@@ -362,6 +365,10 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
txq->tx_tail = tx_tail;
txq->sw_tail = sw_id;
+
+ txq->errors += nb_pkts - nb_tx;
+ txq->packets += nb_tx;
+ txq->bytes += bytes;
}
return nb_tx;
@@ -380,6 +387,7 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct rte_mbuf *tx_pkt, *first;
uint16_t nb_used, hlen, i;
uint64_t ol_flags, addr;
+ uint64_t bytes = 0;
uint16_t nb_tx = 0;
txr = txq->tx_desc_ring;
@@ -438,12 +446,17 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->nb_free -= nb_used;
tx_tail += nb_used;
+
+ bytes += first->pkt_len;
}
end_of_tx:
if (nb_tx) {
rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
txq->tx_tail = tx_tail;
+
+ txq->packets += nb_tx;
+ txq->bytes += bytes;
}
return nb_tx;