[dpdk-dev,v3,2/2] net/vhost: add pmd xstats

Message ID 1474364205-111569-3-git-send-email-zhiyong.yang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Yuanhan Liu
Headers

Commit Message

Yang, Zhiyong Sept. 20, 2016, 9:36 a.m. UTC
  This feature adds vhost pmd extended statistics from per queue perspective
in order to meet the requirements of the applications such as OVS etc.

The statistics counters are based on RFC 2819 and RFC 2863 as follows:

rx/tx_good_packets
rx/tx_total_bytes
rx/tx_missed_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;

No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics. 

The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:

show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats

net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature. 

Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---

Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
   One function deals with the generic packets update, another one deals
   with increasing the broadcast and multicast with failure packets
   according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
   the code readability.
3. remove some unnecessary type casts and fix one format issue.

Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.

 drivers/net/vhost/rte_eth_vhost.c | 262 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 262 insertions(+)
  

Comments

Yuanhan Liu Sept. 20, 2016, 10:56 a.m. UTC | #1
On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
> index 9157bf1..c3f404d 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -78,6 +78,7 @@ struct vhost_stats {
>  	uint64_t missed_pkts;
>  	uint64_t rx_bytes;
>  	uint64_t tx_bytes;
> +	uint64_t xstats[16];
>  };
>  
>  struct vhost_queue {
> @@ -131,6 +132,252 @@ struct rte_vhost_vring_state {
>  
>  static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
>  
> +enum vhost_xstats_pkts {
> +	VHOST_UNDERSIZE_PKT = 0,
> +	VHOST_64_PKT,
> +	VHOST_65_TO_127_PKT,
> +	VHOST_128_TO_255_PKT,
> +	VHOST_256_TO_511_PKT,
> +	VHOST_512_TO_1023_PKT,
> +	VHOST_1024_TO_1522_PKT,
> +	VHOST_1523_TO_MAX_PKT,
> +	VHOST_BROADCAST_PKT,
> +	VHOST_MULTICAST_PKT,
> +	VHOST_ERRORS_PKT,
> +	VHOST_ERRORS_FRAGMENTED,
> +	VHOST_ERRORS_JABBER,
> +	VHOST_UNKNOWN_PROTOCOL,
> +};

The definetion should go before "struct vhost_stats".

And VHOST_UNKNOWN_PROTOCOL should be VHOST_XSTATS_MAX. With that, the
hardcoded number "16" could be avoided and replaced by it.

> +
> +#define VHOST_XSTATS_NAME_SIZE 64
> +
> +struct vhost_xstats_name_off {
> +	char name[VHOST_XSTATS_NAME_SIZE];
> +	uint64_t offset;
> +};
> +
> +/* [rt]_qX_ is prepended to the name string here */
> +static const struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
> +	{"good_packets",
> +	 offsetof(struct vhost_queue, stats.rx_pkts)},
> +	{"total_bytes",
> +	 offsetof(struct vhost_queue, stats.rx_bytes)},
> +	{"missed_pkts",
> +	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	{"broadcast_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
> +	{"multicast_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
> +	{"undersize_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
> +	{"size_64_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> +	{"size_65_to_127_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
> +	{"size_128_to_255_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
> +	{"size_256_to_511_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
> +	{"size_512_to_1023_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
> +	{"size_1024_to_1522_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
> +	{"size_1523_to_max_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
> +	{"errors_with_bad_CRC",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> +	{"fragmented_errors",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
> +	{"jabber_errors",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
> +	{"unknown_protos_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},

Hmm, is "unknown_protos_packets" a valid stats? Why we should handle it
here? Besides, it will always not be accessed, right? I mean,
VHOST_NB_RXQ_XSTATS makes sure that.

> +};
> +
> +/* [tx]_qX_ is prepended to the name string here */
> +static const struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
> +	{"good_packets",
> +	 offsetof(struct vhost_queue, stats.tx_pkts)},
> +	{"total_bytes",
> +	 offsetof(struct vhost_queue, stats.tx_bytes)},
> +	{"missed_pkts",
> +	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	{"broadcast_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
> +	{"multicast_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
> +	{"size_64_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> +	{"size_65_to_127_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
> +	{"size_128_to_255_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
> +	{"size_256_to_511_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
> +	{"size_512_to_1023_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
> +	{"size_1024_to_1522_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
> +	{"size_1523_to_max_packets",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
> +	{"errors_with_bad_CRC",
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> +};
> +
> +#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
> +			     sizeof(vhost_rxq_stat_strings[0]))
> +
> +#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
> +			     sizeof(vhost_txq_stat_strings[0]))
> +
> +static void
> +vhost_dev_xstats_reset(struct rte_eth_dev *dev)
> +{
> +	struct vhost_queue *vqrx = NULL;
> +	struct vhost_queue *vqtx = NULL;

I will not introduce two var here: I'd just define one, vq.

> +	unsigned int i = 0;
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		vqrx = dev->data->rx_queues[i];
> +		if (!vqrx)
> +			continue;
> +		memset(&vqrx->stats, 0, sizeof(vqrx->stats));
> +	}
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		vqtx = dev->data->tx_queues[i];
> +		if (!vqtx)
> +			continue;
> +		memset(&vqtx->stats, 0, sizeof(vqtx->stats));
> +	}
> +}
> +
> +static int
> +vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
> +			   struct rte_eth_xstat_name *xstats_names,
> +			   unsigned int limit __rte_unused)
> +{
> +	unsigned int i = 0;
> +	unsigned int t = 0;
> +	int count = 0;
> +	int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> +			+ dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
> +
> +	if (!xstats_names)
> +		return nstats;
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		if (!dev->data->rx_queues[i])
> +			continue;
> +		for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> +			snprintf(xstats_names[count].name,
> +				 sizeof(xstats_names[count].name),
> +				 "rx_q%u_%s", i,
> +				 vhost_rxq_stat_strings[t].name);
> +				 count++;
> +		}
> +	}
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		if (!dev->data->tx_queues[i])
> +			continue;
> +		for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> +			snprintf(xstats_names[count].name,
> +				 sizeof(xstats_names[count].name),
> +				 "tx_q%u_%s", i,
> +				 vhost_txq_stat_strings[t].name);
> +				 count++;
> +		}
> +	}
> +	return count;
> +}
> +
> +static int
> +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
> +		     unsigned int n)
> +{
> +	unsigned int i;
> +	unsigned int t;
> +	unsigned int count = 0;
> +	unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> +			     + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
> +
> +	if (n < nxstats)
> +		return nxstats;
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		if (!dev->data->rx_queues[i])
> +			continue;
> +		for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> +			xstats[count].value =
> +				*(uint64_t *)(((char *)dev->data->rx_queues[i])
> +				+ vhost_rxq_stat_strings[t].offset);
> +			count++;
> +		}
> +	}
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		if (!dev->data->tx_queues[i])
> +			continue;
> +		for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> +			xstats[count].value =
> +				*(uint64_t *)(((char *)dev->data->tx_queues[i])
> +				+ vhost_txq_stat_strings[t].offset);
> +			count++;
> +		}
> +	}
> +	return count;
> +}

> +static void
> +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> +				struct rte_mbuf **bufs,

I would let this function to count one mbuf, so that --->

> +				uint16_t begin,
> +				uint16_t end)
> +{
> +	uint64_t i = 0;
> +	struct ether_addr *ea = NULL;
> +	struct vhost_stats *pstats = &vq->stats;
> +
> +	for (i = begin; i < end; i++) {
> +		ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
> +		if (is_multicast_ether_addr(ea)) {
> +			if (is_broadcast_ether_addr(ea))
> +				pstats->xstats[VHOST_BROADCAST_PKT]++;
> +			else
> +				pstats->xstats[VHOST_MULTICAST_PKT]++;
> +		}
> +	}
> +}
> +
> +static void
> +vhost_update_packet_xstats(struct vhost_queue *vq,
> +			   struct rte_mbuf **bufs,
> +			   uint16_t nb_rxtx)
> +{
> +	uint32_t pkt_len = 0;
> +	uint64_t i = 0;
> +	uint64_t index;
> +	struct vhost_stats *pstats = &vq->stats;
> +
> +	for (i = 0; i < nb_rxtx ; i++) {
> +		pkt_len = bufs[i]->pkt_len;
> +		if (pkt_len == 64) {
> +			pstats->xstats[VHOST_64_PKT]++;
> +		} else if (pkt_len > 64 && pkt_len < 1024) {
> +			index = (sizeof(pkt_len) * 8)
> +				- __builtin_clz(pkt_len) - 5;
> +			pstats->xstats[index]++;
> +		} else {
> +			if (pkt_len < 64)
> +				pstats->xstats[VHOST_UNDERSIZE_PKT]++;
> +			else if (pkt_len <= 1522)
> +				pstats->xstats[VHOST_1024_TO_1522_PKT]++;
> +			else if (pkt_len > 1522)
> +				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
> +		}

... you could put it here, and save another for() loop introdueced by
vhost_count_multicast_broadcast().

BTW, I will use simple vars, say "count", but not "nb_rxtx".

	--yliu
  
Yuanhan Liu Sept. 20, 2016, 11:50 a.m. UTC | #2
On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> +enum vhost_xstats_pkts {
> +	VHOST_UNDERSIZE_PKT = 0,
> +	VHOST_64_PKT,
> +	VHOST_65_TO_127_PKT,
> +	VHOST_128_TO_255_PKT,
> +	VHOST_256_TO_511_PKT,
> +	VHOST_512_TO_1023_PKT,
> +	VHOST_1024_TO_1522_PKT,
> +	VHOST_1523_TO_MAX_PKT,
> +	VHOST_BROADCAST_PKT,
> +	VHOST_MULTICAST_PKT,

Another thing I noted is that you dropped unitcast counting here. I
think the comment I gave in last email was you don't have to update
it every time we rx/tx packets, instead, you could calcuate it on
query (at vhost_dev_xstats_get). I was not asking you to drop it.

	--yliu
  
Yang, Zhiyong Sept. 21, 2016, 6:15 a.m. UTC | #3
Hi, yuanhan:

> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Tuesday, September 20, 2016 7:50 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>
> Subject: Re: [PATCH v3 2/2] net/vhost: add pmd xstats
> 
> On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> > +enum vhost_xstats_pkts {
> > +	VHOST_UNDERSIZE_PKT = 0,
> > +	VHOST_64_PKT,
> > +	VHOST_65_TO_127_PKT,
> > +	VHOST_128_TO_255_PKT,
> > +	VHOST_256_TO_511_PKT,
> > +	VHOST_512_TO_1023_PKT,
> > +	VHOST_1024_TO_1522_PKT,
> > +	VHOST_1523_TO_MAX_PKT,
> > +	VHOST_BROADCAST_PKT,
> > +	VHOST_MULTICAST_PKT,
> 
> Another thing I noted is that you dropped unitcast counting here. I think the
> comment I gave in last email was you don't have to update it every time we
> rx/tx packets, instead, you could calcuate it on query (at
> vhost_dev_xstats_get). I was not asking you to drop it.
> 

Ok, I'm sorry to misunderstand you and will restore it in next V4 patch
as you suggest.

> 	--yliu
  
Yang, Zhiyong Sept. 21, 2016, 7:22 a.m. UTC | #4
Hi, yuanhan:

I will fix your comments in V4 patch.

Thanks
--Zhiyong
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Tuesday, September 20, 2016 6:57 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>
> Subject: Re: [PATCH v3 2/2] net/vhost: add pmd xstats
> 
> On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> > diff --git a/drivers/net/vhost/rte_eth_vhost.c
> > b/drivers/net/vhost/rte_eth_vhost.c
> > index 9157bf1..c3f404d 100644
> > --- a/drivers/net/vhost/rte_eth_vhost.c
> > +++ b/drivers/net/vhost/rte_eth_vhost.c
> > @@ -78,6 +78,7 @@ struct vhost_stats {
> >  	uint64_t missed_pkts;
> >  	uint64_t rx_bytes;
> >  	uint64_t tx_bytes;
> > +	uint64_t xstats[16];
> >  };
> >
> >  struct vhost_queue {
> > @@ -131,6 +132,252 @@ struct rte_vhost_vring_state {
> >
> >  static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
> >
> > +enum vhost_xstats_pkts {
> > +	VHOST_UNDERSIZE_PKT = 0,
> > +	VHOST_64_PKT,
> > +	VHOST_65_TO_127_PKT,
> > +	VHOST_128_TO_255_PKT,
> > +	VHOST_256_TO_511_PKT,
> > +	VHOST_512_TO_1023_PKT,
> > +	VHOST_1024_TO_1522_PKT,
> > +	VHOST_1523_TO_MAX_PKT,
> > +	VHOST_BROADCAST_PKT,
> > +	VHOST_MULTICAST_PKT,
> > +	VHOST_ERRORS_PKT,
> > +	VHOST_ERRORS_FRAGMENTED,
> > +	VHOST_ERRORS_JABBER,
> > +	VHOST_UNKNOWN_PROTOCOL,
> > +};
> 
> The definetion should go before "struct vhost_stats".
> 
> And VHOST_UNKNOWN_PROTOCOL should be VHOST_XSTATS_MAX. With
> that, the hardcoded number "16" could be avoided and replaced by it.
> 

Ok.

> > +
> > +#define VHOST_XSTATS_NAME_SIZE 64
> > +
> > +struct vhost_xstats_name_off {
> > +	char name[VHOST_XSTATS_NAME_SIZE];
> > +	uint64_t offset;
> > +};
> > +
> > +/* [rt]_qX_ is prepended to the name string here */ static const
> > +struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
> > +	{"good_packets",
> > +	 offsetof(struct vhost_queue, stats.rx_pkts)},
> > +	{"total_bytes",
> > +	 offsetof(struct vhost_queue, stats.rx_bytes)},
> > +	{"missed_pkts",
> > +	 offsetof(struct vhost_queue, stats.missed_pkts)},
> > +	{"broadcast_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_BROADCAST_PKT])},
> > +	{"multicast_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_MULTICAST_PKT])},
> > +	{"undersize_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_UNDERSIZE_PKT])},
> > +	{"size_64_packets",
> > +	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> > +	{"size_65_to_127_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_65_TO_127_PKT])},
> > +	{"size_128_to_255_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_128_TO_255_PKT])},
> > +	{"size_256_to_511_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_256_TO_511_PKT])},
> > +	{"size_512_to_1023_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_512_TO_1023_PKT])},
> > +	{"size_1024_to_1522_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_1024_TO_1522_PKT])},
> > +	{"size_1523_to_max_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_1523_TO_MAX_PKT])},
> > +	{"errors_with_bad_CRC",
> > +	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> > +	{"fragmented_errors",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_ERRORS_FRAGMENTED])},
> > +	{"jabber_errors",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_ERRORS_JABBER])},
> > +	{"unknown_protos_packets",
> > +	 offsetof(struct vhost_queue,
> > +stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
> 
> Hmm, is "unknown_protos_packets" a valid stats? Why we should handle it
> here? Besides, it will always not be accessed, right? I mean,
> VHOST_NB_RXQ_XSTATS makes sure that.
> 

Yes, unknown_protos_packets is not accessed and always zero.
 I check the code that I40e driver implements similar counter by NIC register.
I introduce It to want  vhost xstats look like physical NIC for the applications
According  to RFC2863 Section ifInUnknownProtos.

 "For packet-oriented interfaces, the number of packets received via 
the interface which were discarded because of an unknown or 
unsupported protocol. For character-oriented or fixed-length 
interfaces that support protocol multiplexing the number of 
transmission units received via the interface which were discarded 
because of an unknown or unsupported protocol. For any interface 
that does not support protocol multiplexing, this counter will always be 0.

> > +
> > +/* [tx]_qX_ is prepended to the name string here */ static const
> > +struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
> > +	{"good_packets",
> > +	 offsetof(struct vhost_queue, stats.tx_pkts)},
> > +	{"total_bytes",
> > +	 offsetof(struct vhost_queue, stats.tx_bytes)},
> > +	{"missed_pkts",
> > +	 offsetof(struct vhost_queue, stats.missed_pkts)},
> > +	{"broadcast_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_BROADCAST_PKT])},
> > +	{"multicast_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_MULTICAST_PKT])},
> > +	{"size_64_packets",
> > +	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> > +	{"size_65_to_127_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_65_TO_127_PKT])},
> > +	{"size_128_to_255_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_128_TO_255_PKT])},
> > +	{"size_256_to_511_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_256_TO_511_PKT])},
> > +	{"size_512_to_1023_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_512_TO_1023_PKT])},
> > +	{"size_1024_to_1522_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_1024_TO_1522_PKT])},
> > +	{"size_1523_to_max_packets",
> > +	 offsetof(struct vhost_queue,
> stats.xstats[VHOST_1523_TO_MAX_PKT])},
> > +	{"errors_with_bad_CRC",
> > +	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])}, };
> > +
> > +#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
> > +			     sizeof(vhost_rxq_stat_strings[0]))
> > +
> > +#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
> > +			     sizeof(vhost_txq_stat_strings[0]))
> > +
> > +static void
> > +vhost_dev_xstats_reset(struct rte_eth_dev *dev) {
> > +	struct vhost_queue *vqrx = NULL;
> > +	struct vhost_queue *vqtx = NULL;
> 
> I will not introduce two var here: I'd just define one, vq.
> 

Ok

> > +	unsigned int i = 0;
> > +
> > +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > +		vqrx = dev->data->rx_queues[i];
> > +		if (!vqrx)
> > +			continue;
> > +		memset(&vqrx->stats, 0, sizeof(vqrx->stats));
> > +	}
> > +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > +		vqtx = dev->data->tx_queues[i];
> > +		if (!vqtx)
> > +			continue;
> > +		memset(&vqtx->stats, 0, sizeof(vqtx->stats));
> > +	}
> > +}
> > +
> > +static int
> > +vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
> > +			   struct rte_eth_xstat_name *xstats_names,
> > +			   unsigned int limit __rte_unused) {
> > +	unsigned int i = 0;
> > +	unsigned int t = 0;
> > +	int count = 0;
> > +	int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> > +			+ dev->data->nb_tx_queues *
> VHOST_NB_TXQ_XSTATS;
> > +
> > +	if (!xstats_names)
> > +		return nstats;
> > +
> > +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > +		if (!dev->data->rx_queues[i])
> > +			continue;
> > +		for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> > +			snprintf(xstats_names[count].name,
> > +				 sizeof(xstats_names[count].name),
> > +				 "rx_q%u_%s", i,
> > +				 vhost_rxq_stat_strings[t].name);
> > +				 count++;
> > +		}
> > +	}
> > +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > +		if (!dev->data->tx_queues[i])
> > +			continue;
> > +		for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> > +			snprintf(xstats_names[count].name,
> > +				 sizeof(xstats_names[count].name),
> > +				 "tx_q%u_%s", i,
> > +				 vhost_txq_stat_strings[t].name);
> > +				 count++;
> > +		}
> > +	}
> > +	return count;
> > +}
> > +
> > +static int
> > +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat
> *xstats,
> > +		     unsigned int n)
> > +{
> > +	unsigned int i;
> > +	unsigned int t;
> > +	unsigned int count = 0;
> > +	unsigned int nxstats = dev->data->nb_rx_queues *
> VHOST_NB_RXQ_XSTATS
> > +			     + dev->data->nb_tx_queues *
> VHOST_NB_TXQ_XSTATS;
> > +
> > +	if (n < nxstats)
> > +		return nxstats;
> > +
> > +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > +		if (!dev->data->rx_queues[i])
> > +			continue;
> > +		for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> > +			xstats[count].value =
> > +				*(uint64_t *)(((char *)dev->data-
> >rx_queues[i])
> > +				+ vhost_rxq_stat_strings[t].offset);
> > +			count++;
> > +		}
> > +	}
> > +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > +		if (!dev->data->tx_queues[i])
> > +			continue;
> > +		for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> > +			xstats[count].value =
> > +				*(uint64_t *)(((char *)dev->data-
> >tx_queues[i])
> > +				+ vhost_txq_stat_strings[t].offset);
> > +			count++;
> > +		}
> > +	}
> > +	return count;
> > +}
> 
> > +static void
> > +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> > +				struct rte_mbuf **bufs,
> 
> I would let this function to count one mbuf, so that --->
> 
> > +				uint16_t begin,
> > +				uint16_t end)
> > +{
> > +	uint64_t i = 0;
> > +	struct ether_addr *ea = NULL;
> > +	struct vhost_stats *pstats = &vq->stats;
> > +
> > +	for (i = begin; i < end; i++) {
> > +		ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
> > +		if (is_multicast_ether_addr(ea)) {
> > +			if (is_broadcast_ether_addr(ea))
> > +				pstats->xstats[VHOST_BROADCAST_PKT]++;
> > +			else
> > +				pstats->xstats[VHOST_MULTICAST_PKT]++;
> > +		}
> > +	}
> > +}
> > +
> > +static void
> > +vhost_update_packet_xstats(struct vhost_queue *vq,
> > +			   struct rte_mbuf **bufs,
> > +			   uint16_t nb_rxtx)
> > +{
> > +	uint32_t pkt_len = 0;
> > +	uint64_t i = 0;
> > +	uint64_t index;
> > +	struct vhost_stats *pstats = &vq->stats;
> > +
> > +	for (i = 0; i < nb_rxtx ; i++) {
> > +		pkt_len = bufs[i]->pkt_len;
> > +		if (pkt_len == 64) {
> > +			pstats->xstats[VHOST_64_PKT]++;
> > +		} else if (pkt_len > 64 && pkt_len < 1024) {
> > +			index = (sizeof(pkt_len) * 8)
> > +				- __builtin_clz(pkt_len) - 5;
> > +			pstats->xstats[index]++;
> > +		} else {
> > +			if (pkt_len < 64)
> > +				pstats->xstats[VHOST_UNDERSIZE_PKT]++;
> > +			else if (pkt_len <= 1522)
> > +				pstats-
> >xstats[VHOST_1024_TO_1522_PKT]++;
> > +			else if (pkt_len > 1522)
> > +				pstats-
> >xstats[VHOST_1523_TO_MAX_PKT]++;
> > +		}
> 
> ... you could put it here, and save another for() loop introdueced by
> vhost_count_multicast_broadcast().
> 
> BTW, I will use simple vars, say "count", but not "nb_rxtx".

Ok.

> 
> 	--yliu
  

Patch

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 9157bf1..c3f404d 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -78,6 +78,7 @@  struct vhost_stats {
 	uint64_t missed_pkts;
 	uint64_t rx_bytes;
 	uint64_t tx_bytes;
+	uint64_t xstats[16];
 };
 
 struct vhost_queue {
@@ -131,6 +132,252 @@  struct rte_vhost_vring_state {
 
 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
 
+enum vhost_xstats_pkts {
+	VHOST_UNDERSIZE_PKT = 0,
+	VHOST_64_PKT,
+	VHOST_65_TO_127_PKT,
+	VHOST_128_TO_255_PKT,
+	VHOST_256_TO_511_PKT,
+	VHOST_512_TO_1023_PKT,
+	VHOST_1024_TO_1522_PKT,
+	VHOST_1523_TO_MAX_PKT,
+	VHOST_BROADCAST_PKT,
+	VHOST_MULTICAST_PKT,
+	VHOST_ERRORS_PKT,
+	VHOST_ERRORS_FRAGMENTED,
+	VHOST_ERRORS_JABBER,
+	VHOST_UNKNOWN_PROTOCOL,
+};
+
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+	char name[VHOST_XSTATS_NAME_SIZE];
+	uint64_t offset;
+};
+
+/* [rt]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
+	{"good_packets",
+	 offsetof(struct vhost_queue, stats.rx_pkts)},
+	{"total_bytes",
+	 offsetof(struct vhost_queue, stats.rx_bytes)},
+	{"missed_pkts",
+	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	{"broadcast_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+	{"multicast_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+	{"undersize_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+	{"size_64_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+	{"size_65_to_127_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+	{"size_128_to_255_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+	{"size_256_to_511_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+	{"size_512_to_1023_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+	{"size_1024_to_1522_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+	{"size_1523_to_max_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+	{"errors_with_bad_CRC",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+	{"fragmented_errors",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+	{"jabber_errors",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+	{"unknown_protos_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
+	{"good_packets",
+	 offsetof(struct vhost_queue, stats.tx_pkts)},
+	{"total_bytes",
+	 offsetof(struct vhost_queue, stats.tx_bytes)},
+	{"missed_pkts",
+	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	{"broadcast_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+	{"multicast_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+	{"size_64_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+	{"size_65_to_127_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+	{"size_128_to_255_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+	{"size_256_to_511_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+	{"size_512_to_1023_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+	{"size_1024_to_1522_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+	{"size_1523_to_max_packets",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+	{"errors_with_bad_CRC",
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
+			     sizeof(vhost_rxq_stat_strings[0]))
+
+#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
+			     sizeof(vhost_txq_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct vhost_queue *vqrx = NULL;
+	struct vhost_queue *vqtx = NULL;
+	unsigned int i = 0;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		vqrx = dev->data->rx_queues[i];
+		if (!vqrx)
+			continue;
+		memset(&vqrx->stats, 0, sizeof(vqrx->stats));
+	}
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		vqtx = dev->data->tx_queues[i];
+		if (!vqtx)
+			continue;
+		memset(&vqtx->stats, 0, sizeof(vqtx->stats));
+	}
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+			   struct rte_eth_xstat_name *xstats_names,
+			   unsigned int limit __rte_unused)
+{
+	unsigned int i = 0;
+	unsigned int t = 0;
+	int count = 0;
+	int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+			+ dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+	if (!xstats_names)
+		return nstats;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		if (!dev->data->rx_queues[i])
+			continue;
+		for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+			snprintf(xstats_names[count].name,
+				 sizeof(xstats_names[count].name),
+				 "rx_q%u_%s", i,
+				 vhost_rxq_stat_strings[t].name);
+				 count++;
+		}
+	}
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		if (!dev->data->tx_queues[i])
+			continue;
+		for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+			snprintf(xstats_names[count].name,
+				 sizeof(xstats_names[count].name),
+				 "tx_q%u_%s", i,
+				 vhost_txq_stat_strings[t].name);
+				 count++;
+		}
+	}
+	return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+		     unsigned int n)
+{
+	unsigned int i;
+	unsigned int t;
+	unsigned int count = 0;
+	unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+			     + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+	if (n < nxstats)
+		return nxstats;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		if (!dev->data->rx_queues[i])
+			continue;
+		for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+			xstats[count].value =
+				*(uint64_t *)(((char *)dev->data->rx_queues[i])
+				+ vhost_rxq_stat_strings[t].offset);
+			count++;
+		}
+	}
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		if (!dev->data->tx_queues[i])
+			continue;
+		for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+			xstats[count].value =
+				*(uint64_t *)(((char *)dev->data->tx_queues[i])
+				+ vhost_txq_stat_strings[t].offset);
+			count++;
+		}
+	}
+	return count;
+}
+
+static void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+				struct rte_mbuf **bufs,
+				uint16_t begin,
+				uint16_t end)
+{
+	uint64_t i = 0;
+	struct ether_addr *ea = NULL;
+	struct vhost_stats *pstats = &vq->stats;
+
+	for (i = begin; i < end; i++) {
+		ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
+		if (is_multicast_ether_addr(ea)) {
+			if (is_broadcast_ether_addr(ea))
+				pstats->xstats[VHOST_BROADCAST_PKT]++;
+			else
+				pstats->xstats[VHOST_MULTICAST_PKT]++;
+		}
+	}
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+			   struct rte_mbuf **bufs,
+			   uint16_t nb_rxtx)
+{
+	uint32_t pkt_len = 0;
+	uint64_t i = 0;
+	uint64_t index;
+	struct vhost_stats *pstats = &vq->stats;
+
+	for (i = 0; i < nb_rxtx ; i++) {
+		pkt_len = bufs[i]->pkt_len;
+		if (pkt_len == 64) {
+			pstats->xstats[VHOST_64_PKT]++;
+		} else if (pkt_len > 64 && pkt_len < 1024) {
+			index = (sizeof(pkt_len) * 8)
+				- __builtin_clz(pkt_len) - 5;
+			pstats->xstats[index]++;
+		} else {
+			if (pkt_len < 64)
+				pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+			else if (pkt_len <= 1522)
+				pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+			else if (pkt_len > 1522)
+				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+		}
+	}
+
+	vhost_count_multicast_broadcast(vq, bufs, 0, nb_rxtx);
+}
+
 static uint16_t
 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
@@ -156,6 +403,8 @@  eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		r->stats.rx_bytes += bufs[i]->pkt_len;
 	}
 
+	vhost_update_packet_xstats(r, bufs, nb_rx);
+
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
 
@@ -186,8 +435,18 @@  eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	for (i = 0; likely(i < nb_tx); i++)
 		r->stats.tx_bytes += bufs[i]->pkt_len;
 
+	vhost_update_packet_xstats(r, bufs, nb_tx);
+
+	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
+	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+	 * are increased when packets are not transmitted successfully.
+	 */
+	if ((nb_bufs - nb_tx) > 0)
+		vhost_count_multicast_broadcast(r, bufs, nb_tx, nb_bufs);
+
 	for (i = 0; likely(i < nb_tx); i++)
 		rte_pktmbuf_free(bufs[i]);
+
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
 
@@ -686,6 +945,9 @@  static const struct eth_dev_ops ops = {
 	.link_update = eth_link_update,
 	.stats_get = eth_stats_get,
 	.stats_reset = eth_stats_reset,
+	.xstats_reset = vhost_dev_xstats_reset,
+	.xstats_get = vhost_dev_xstats_get,
+	.xstats_get_names = vhost_dev_xstats_get_names,
 };
 
 static int