[1/2] net/virtio: update stats when in order xmit done

Message ID 20190827102407.65106-1-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [1/2] net/virtio: update stats when in order xmit done |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/iol-Compile-Testing success Compile Testing PASS
ci/intel-Performance-Testing success Performance Testing PASS
ci/mellanox-Performance-Testing success Performance Testing PASS

Commit Message

Marvin Liu Aug. 27, 2019, 10:24 a.m. UTC
  When doing xmit in-order enqueue, packets are buffered and then flushed
into avail ring. It has possibility that no free room in avail ring,
thus some buffered packets can't be transmitted. So move stats update
just after successful avail ring updates.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
---
 drivers/net/virtio/virtio_rxtx.c | 86 ++++++++++++++++----------------
 1 file changed, 43 insertions(+), 43 deletions(-)
  

Comments

Tiwei Bie Sept. 10, 2019, 5:45 a.m. UTC | #1
On Tue, Aug 27, 2019 at 06:24:06PM +0800, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. It has possibility that no free room in avail ring,
> thus some buffered packets can't be transmitted. So move stats update
> just after successful avail ring updates.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
>  drivers/net/virtio/virtio_rxtx.c | 86 ++++++++++++++++----------------
>  1 file changed, 43 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 27ead19fb..5d4ed524e 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -575,6 +575,48 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
>  	}
>  }
>  
> +static inline void
> +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> +{
> +	uint32_t s = mbuf->pkt_len;
> +	struct rte_ether_addr *ea;
> +
> +	stats->bytes += s;
> +
> +	if (s == 64) {
> +		stats->size_bins[1]++;
> +	} else if (s > 64 && s < 1024) {
> +		uint32_t bin;
> +
> +		/* count zeros, and offset into correct bin */
> +		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> +		stats->size_bins[bin]++;
> +	} else {
> +		if (s < 64)
> +			stats->size_bins[0]++;
> +		else if (s < 1519)
> +			stats->size_bins[6]++;
> +		else
> +			stats->size_bins[7]++;
> +	}
> +
> +	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> +	if (rte_is_multicast_ether_addr(ea)) {
> +		if (rte_is_broadcast_ether_addr(ea))
> +			stats->broadcast++;
> +		else
> +			stats->multicast++;
> +	}
> +}
> +
> +static inline void
> +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> +{
> +	VIRTIO_DUMP_PACKET(m, m->data_len);
> +
> +	virtio_update_packet_stats(&rxvq->stats, m);
> +}

If we move above helpers, it's better to just move them
to the top of this file.

Thanks,
Tiwei
  

Patch

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 27ead19fb..5d4ed524e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -575,6 +575,48 @@  virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
 	}
 }
 
+static inline void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+	uint32_t s = mbuf->pkt_len;
+	struct rte_ether_addr *ea;
+
+	stats->bytes += s;
+
+	if (s == 64) {
+		stats->size_bins[1]++;
+	} else if (s > 64 && s < 1024) {
+		uint32_t bin;
+
+		/* count zeros, and offset into correct bin */
+		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+		stats->size_bins[bin]++;
+	} else {
+		if (s < 64)
+			stats->size_bins[0]++;
+		else if (s < 1519)
+			stats->size_bins[6]++;
+		else
+			stats->size_bins[7]++;
+	}
+
+	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+	if (rte_is_multicast_ether_addr(ea)) {
+		if (rte_is_broadcast_ether_addr(ea))
+			stats->broadcast++;
+		else
+			stats->multicast++;
+	}
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+	VIRTIO_DUMP_PACKET(m, m->data_len);
+
+	virtio_update_packet_stats(&rxvq->stats, m);
+}
+
 static inline void
 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 			struct rte_mbuf **cookies,
@@ -596,6 +638,7 @@  virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
 		dxp->cookie = (void *)cookies[i];
 		dxp->ndescs = 1;
+		virtio_update_packet_stats(&txvq->stats, cookies[i]);
 
 		hdr = (struct virtio_net_hdr *)
 			rte_pktmbuf_prepend(cookies[i], head_size);
@@ -1083,48 +1126,6 @@  virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
 	}
 }
 
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
-	uint32_t s = mbuf->pkt_len;
-	struct rte_ether_addr *ea;
-
-	stats->bytes += s;
-
-	if (s == 64) {
-		stats->size_bins[1]++;
-	} else if (s > 64 && s < 1024) {
-		uint32_t bin;
-
-		/* count zeros, and offset into correct bin */
-		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
-		stats->size_bins[bin]++;
-	} else {
-		if (s < 64)
-			stats->size_bins[0]++;
-		else if (s < 1519)
-			stats->size_bins[6]++;
-		else
-			stats->size_bins[7]++;
-	}
-
-	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
-	if (rte_is_multicast_ether_addr(ea)) {
-		if (rte_is_broadcast_ether_addr(ea))
-			stats->broadcast++;
-		else
-			stats->multicast++;
-	}
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
-	VIRTIO_DUMP_PACKET(m, m->data_len);
-
-	virtio_update_packet_stats(&rxvq->stats, m);
-}
-
 /* Optionally fill offload information in structure */
 static inline int
 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
@@ -2198,7 +2199,6 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 			inorder_pkts[nb_inorder_pkts] = txm;
 			nb_inorder_pkts++;
 
-			virtio_update_packet_stats(&txvq->stats, txm);
 			continue;
 		}