diff mbox

[dpdk-dev,v2,02/24] virtio: Use weaker barriers

Message ID 1422326164-13697-3-git-send-email-changchun.ouyang@intel.com (mailing list archive)
State Superseded, archived
Headers show

Commit Message

Ouyang Changchun Jan. 27, 2015, 2:35 a.m. UTC
The DPDK driver only has to deal with the case of running on PCI
and with SMP. In this case, the code can use the weaker barriers
instead of using hard (fence) barriers. This will help performance.
The rationale is explained in Linux kernel virtio_ring.h.

To make it clearer that this is a virtio thing and not some generic
barrier, prefix the barrier calls with virtio_.

Add missing (and needed) barrier between updating ring data
structure and notifying host.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
---
 lib/librte_pmd_virtio/virtio_ethdev.c |  2 +-
 lib/librte_pmd_virtio/virtio_rxtx.c   |  8 +++++---
 lib/librte_pmd_virtio/virtqueue.h     | 19 ++++++++++++++-----
 3 files changed, 20 insertions(+), 9 deletions(-)

Comments

Huawei Xie Jan. 27, 2015, 7:03 a.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ouyang Changchun
> Sent: Tuesday, January 27, 2015 10:36 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 02/24] virtio: Use weaker barriers
> 
> The DPDK driver only has to deal with the case of running on PCI
> and with SMP. In this case, the code can use the weaker barriers
> instead of using hard (fence) barriers. This will help performance.
> The rationale is explained in Linux kernel virtio_ring.h.
> 
> To make it clearer that this is a virtio thing and not some generic
> barrier, prefix the barrier calls with virtio_.
> 
> Add missing (and needed) barrier between updating ring data
> structure and notifying host.
> 
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
> Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> ---
>  lib/librte_pmd_virtio/virtio_ethdev.c |  2 +-
>  lib/librte_pmd_virtio/virtio_rxtx.c   |  8 +++++---
>  lib/librte_pmd_virtio/virtqueue.h     | 19 ++++++++++++++-----
>  3 files changed, 20 insertions(+), 9 deletions(-)
> 
> diff --git a/lib/librte_pmd_virtio/virtio_ethdev.c
> b/lib/librte_pmd_virtio/virtio_ethdev.c
> index 662a49c..dc47e72 100644
> --- a/lib/librte_pmd_virtio/virtio_ethdev.c
> +++ b/lib/librte_pmd_virtio/virtio_ethdev.c
> @@ -175,7 +175,7 @@ virtio_send_command(struct virtqueue *vq, struct
> virtio_pmd_ctrl *ctrl,
>  		uint32_t idx, desc_idx, used_idx;
>  		struct vring_used_elem *uep;
> 
> -		rmb();
> +		virtio_rmb();
> 
>  		used_idx = (uint32_t)(vq->vq_used_cons_idx
>  				& (vq->vq_nentries - 1));
> diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c
> b/lib/librte_pmd_virtio/virtio_rxtx.c
> index c013f97..78af334 100644
> --- a/lib/librte_pmd_virtio/virtio_rxtx.c
> +++ b/lib/librte_pmd_virtio/virtio_rxtx.c
> @@ -456,7 +456,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
> 
>  	nb_used = VIRTQUEUE_NUSED(rxvq);
> 
> -	rmb();
> +	virtio_rmb();
> 
>  	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
>  	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num :
> VIRTIO_MBUF_BURST_SZ);
> @@ -516,6 +516,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  	}
> 
>  	if (likely(nb_enqueued)) {
> +		virtio_wmb();
>  		if (unlikely(virtqueue_kick_prepare(rxvq))) {
>  			virtqueue_notify(rxvq);
>  			PMD_RX_LOG(DEBUG, "Notified\n");
> @@ -547,7 +548,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> 
>  	nb_used = VIRTQUEUE_NUSED(rxvq);
> 
> -	rmb();
> +	virtio_rmb();
> 
>  	if (nb_used == 0)
>  		return 0;
> @@ -694,7 +695,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
>  	nb_used = VIRTQUEUE_NUSED(txvq);
> 
> -	rmb();
> +	virtio_rmb();
> 
>  	num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used :
> VIRTIO_MBUF_BURST_SZ);
> 
> @@ -735,6 +736,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  		}
>  	}
>  	vq_update_avail_idx(txvq);
> +	virtio_wmb();
> 
>  	txvq->packets += nb_tx;
> 
> diff --git a/lib/librte_pmd_virtio/virtqueue.h b/lib/librte_pmd_virtio/virtqueue.h
> index fdee054..f6ad98d 100644
> --- a/lib/librte_pmd_virtio/virtqueue.h
> +++ b/lib/librte_pmd_virtio/virtqueue.h
> @@ -46,9 +46,18 @@
>  #include "virtio_ring.h"
>  #include "virtio_logs.h"
> 
> -#define mb()  rte_mb()
> -#define wmb() rte_wmb()
> -#define rmb() rte_rmb()
> +/*
> + * Per virtio_config.h in Linux.
> + *     For virtio_pci on SMP, we don't need to order with respect to MMIO
> + *     accesses through relaxed memory I/O windows, so smp_mb() et al are
> + *     sufficient.
> + *
> + * This driver is for virtio_pci on SMP and therefore can assume
> + * weaker (compiler barriers)
> + */
> +#define virtio_mb()	rte_mb()
> +#define virtio_rmb()	rte_compiler_barrier()
> +#define virtio_wmb()	rte_compiler_barrier()
> 
>  #ifdef RTE_PMD_PACKET_PREFETCH
>  #define rte_packet_prefetch(p)  rte_prefetch1(p)
> @@ -225,7 +234,7 @@ virtqueue_full(const struct virtqueue *vq)
>  static inline void
>  vq_update_avail_idx(struct virtqueue *vq)
>  {
> -	rte_compiler_barrier();
> +	virtio_rmb();

I recall our original code is virtio_wmb(). 
Use store fence to ensure all updates to entries before updating the index.
Why do we need virtio_rmb() here and add virtio_wmb after vq_update_avail_idx()?

>  	vq->vq_ring.avail->idx = vq->vq_avail_idx;
>  }
> 
> @@ -255,7 +264,7 @@ static inline void
>  virtqueue_notify(struct virtqueue *vq)
>  {
>  	/*
> -	 * Ensure updated avail->idx is visible to host. mb() necessary?
> +	 * Ensure updated avail->idx is visible to host.
>  	 * For virtio on IA, the notificaiton is through io port operation
>  	 * which is a serialization instruction itself.
>  	 */
> --
> 1.8.4.2
Huawei Xie Jan. 27, 2015, 7:56 a.m. UTC | #2
>-------if (likely(nb_enqueued)) {
>------->-------virtio_wmb();
>------->-------if (unlikely(virtqueue_kick_prepare(rxvq))) {
>------->------->-------virtqueue_notify(rxvq);
>------->------->-------PMD_RX_LOG(DEBUG, "Notified\n");
>------->-------}
>-------}
>-------vq_update_avail_idx(rxvq);


Two confuses for the modification here:

1.
why notify host without updating avail idx?
Will this cause potential deadlock?

2.
Why update avail index even no packets are enqueued?
Ouyang Changchun Jan. 27, 2015, 8:04 a.m. UTC | #3
Hi Stephen,
Although it is original code logic, 
But we can move vq_update_avail_idx(rxvq) into if block to resolve it.
What do you think of it?

Thanks
Changchun

-----Original Message-----
From: Xie, Huawei 
Sent: Tuesday, January 27, 2015 3:57 PM
To: Ouyang, Changchun; dev@dpdk.org
Cc: Stephen Hemminger (stephen@networkplumber.org)
Subject: RE: [dpdk-dev] [PATCH v2 02/24] virtio: Use weaker barriers

>-------if (likely(nb_enqueued)) {
>------->-------virtio_wmb();
>------->-------if (unlikely(virtqueue_kick_prepare(rxvq))) {
>------->------->-------virtqueue_notify(rxvq);
>------->------->-------PMD_RX_LOG(DEBUG, "Notified\n");
>------->-------}
>-------}
>-------vq_update_avail_idx(rxvq);


Two confuses for the modification here:

1.
why notify host without updating avail idx?
Will this cause potential deadlock?

2.
Why update avail index even no packets are enqueued?
Stephen Hemminger Jan. 27, 2015, 9:58 a.m. UTC | #4
> I recall our original code is virtio_wmb(). 
> Use store fence to ensure all updates to entries before updating the index.
> Why do we need virtio_rmb() here and add virtio_wmb after vq_update_avail_idx()?

Store fence is unnecessary, Intel CPU's are cache coherent, please read
the virtio Linux ring header file for explanation. A full fence WMB
is more expensive and causes CPU stall

> >  	vq->vq_ring.avail->idx = vq->vq_avail_idx;
> >  }
> > 
> > @@ -255,7 +264,7 @@ static inline void
> >  virtqueue_notify(struct virtqueue *vq)
> >  {
> >  	/*
> > -	 * Ensure updated avail->idx is visible to host. mb() necessary?
> > +	 * Ensure updated avail->idx is visible to host.
> >  	 * For virtio on IA, the notificaiton is through io port operation
> >  	 * which is a serialization instruction itself.
> >  	 */
> > --
> > 1.8.4.2
>
Huawei Xie Jan. 27, 2015, 4:16 p.m. UTC | #5
> -----Original Message-----
> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Tuesday, January 27, 2015 5:59 PM
> To: Xie, Huawei
> Cc: Ouyang, Changchun; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 02/24] virtio: Use weaker barriers
> 
> 
> > I recall our original code is virtio_wmb().
> > Use store fence to ensure all updates to entries before updating the index.
> > Why do we need virtio_rmb() here and add virtio_wmb after
> vq_update_avail_idx()?
> 
> Store fence is unnecessary, Intel CPU's are cache coherent, please read
> the virtio Linux ring header file for explanation. A full fence WMB
> is more expensive and causes CPU stall
> 


I mean virtio_wmb rather than virtio_rmb should be used here, 
and both of them are defined as compiler barrier.

The following code is linux virtio driver for adding buffer to vring.
/* Put entry in available array (but don't update avail->idx until they
	 * do sync). */
	avail = (vq->vring.avail->idx & (vq->vring.num-1));
	vq->vring.avail->ring[avail] = head;

	/* Descriptors and available array need to be set before we expose the
	 * new available array entries. */
	virtio_wmb(vq->weak_barriers);
	vq->vring.avail->idx++;

> > >  	vq->vq_ring.avail->idx = vq->vq_avail_idx;
> > >  }
> > >
> > > @@ -255,7 +264,7 @@ static inline void
> > >  virtqueue_notify(struct virtqueue *vq)
> > >  {
> > >  	/*
> > > -	 * Ensure updated avail->idx is visible to host. mb() necessary?
> > > +	 * Ensure updated avail->idx is visible to host.
> > >  	 * For virtio on IA, the notificaiton is through io port operation
> > >  	 * which is a serialization instruction itself.
> > >  	 */
> > > --
> > > 1.8.4.2
> >
Ouyang Changchun Jan. 28, 2015, 6:12 a.m. UTC | #6
> -----Original Message-----
> From: Xie, Huawei
> Sent: Wednesday, January 28, 2015 12:16 AM
> To: Stephen Hemminger
> Cc: Ouyang, Changchun; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 02/24] virtio: Use weaker barriers
> 
> 
> 
> > -----Original Message-----
> > From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> > Sent: Tuesday, January 27, 2015 5:59 PM
> > To: Xie, Huawei
> > Cc: Ouyang, Changchun; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v2 02/24] virtio: Use weaker barriers
> >
> >
> > > I recall our original code is virtio_wmb().
> > > Use store fence to ensure all updates to entries before updating the
> index.
> > > Why do we need virtio_rmb() here and add virtio_wmb after
> > vq_update_avail_idx()?
> >
> > Store fence is unnecessary, Intel CPU's are cache coherent, please
> > read the virtio Linux ring header file for explanation. A full fence
> > WMB is more expensive and causes CPU stall
> >
> 
> 
> I mean virtio_wmb rather than virtio_rmb should be used here, and both of
> them are defined as compiler barrier.
> 
> The following code is linux virtio driver for adding buffer to vring.
> /* Put entry in available array (but don't update avail->idx until they
> 	 * do sync). */
> 	avail = (vq->vring.avail->idx & (vq->vring.num-1));
> 	vq->vring.avail->ring[avail] = head;
> 
> 	/* Descriptors and available array need to be set before we expose
> the
> 	 * new available array entries. */
> 	virtio_wmb(vq->weak_barriers);
> 	vq->vring.avail->idx++;
> 

Yes, use virtio_wmb is better here, will change it in next version.

Thanks
Changchun
diff mbox

Patch

diff --git a/lib/librte_pmd_virtio/virtio_ethdev.c b/lib/librte_pmd_virtio/virtio_ethdev.c
index 662a49c..dc47e72 100644
--- a/lib/librte_pmd_virtio/virtio_ethdev.c
+++ b/lib/librte_pmd_virtio/virtio_ethdev.c
@@ -175,7 +175,7 @@  virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
 		uint32_t idx, desc_idx, used_idx;
 		struct vring_used_elem *uep;
 
-		rmb();
+		virtio_rmb();
 
 		used_idx = (uint32_t)(vq->vq_used_cons_idx
 				& (vq->vq_nentries - 1));
diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c b/lib/librte_pmd_virtio/virtio_rxtx.c
index c013f97..78af334 100644
--- a/lib/librte_pmd_virtio/virtio_rxtx.c
+++ b/lib/librte_pmd_virtio/virtio_rxtx.c
@@ -456,7 +456,7 @@  virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
 	nb_used = VIRTQUEUE_NUSED(rxvq);
 
-	rmb();
+	virtio_rmb();
 
 	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
 	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
@@ -516,6 +516,7 @@  virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	}
 
 	if (likely(nb_enqueued)) {
+		virtio_wmb();
 		if (unlikely(virtqueue_kick_prepare(rxvq))) {
 			virtqueue_notify(rxvq);
 			PMD_RX_LOG(DEBUG, "Notified\n");
@@ -547,7 +548,7 @@  virtio_recv_mergeable_pkts(void *rx_queue,
 
 	nb_used = VIRTQUEUE_NUSED(rxvq);
 
-	rmb();
+	virtio_rmb();
 
 	if (nb_used == 0)
 		return 0;
@@ -694,7 +695,7 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
 	nb_used = VIRTQUEUE_NUSED(txvq);
 
-	rmb();
+	virtio_rmb();
 
 	num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
 
@@ -735,6 +736,7 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		}
 	}
 	vq_update_avail_idx(txvq);
+	virtio_wmb();
 
 	txvq->packets += nb_tx;
 
diff --git a/lib/librte_pmd_virtio/virtqueue.h b/lib/librte_pmd_virtio/virtqueue.h
index fdee054..f6ad98d 100644
--- a/lib/librte_pmd_virtio/virtqueue.h
+++ b/lib/librte_pmd_virtio/virtqueue.h
@@ -46,9 +46,18 @@ 
 #include "virtio_ring.h"
 #include "virtio_logs.h"
 
-#define mb()  rte_mb()
-#define wmb() rte_wmb()
-#define rmb() rte_rmb()
+/*
+ * Per virtio_config.h in Linux.
+ *     For virtio_pci on SMP, we don't need to order with respect to MMIO
+ *     accesses through relaxed memory I/O windows, so smp_mb() et al are
+ *     sufficient.
+ *
+ * This driver is for virtio_pci on SMP and therefore can assume
+ * weaker (compiler barriers)
+ */
+#define virtio_mb()	rte_mb()
+#define virtio_rmb()	rte_compiler_barrier()
+#define virtio_wmb()	rte_compiler_barrier()
 
 #ifdef RTE_PMD_PACKET_PREFETCH
 #define rte_packet_prefetch(p)  rte_prefetch1(p)
@@ -225,7 +234,7 @@  virtqueue_full(const struct virtqueue *vq)
 static inline void
 vq_update_avail_idx(struct virtqueue *vq)
 {
-	rte_compiler_barrier();
+	virtio_rmb();
 	vq->vq_ring.avail->idx = vq->vq_avail_idx;
 }
 
@@ -255,7 +264,7 @@  static inline void
 virtqueue_notify(struct virtqueue *vq)
 {
 	/*
-	 * Ensure updated avail->idx is visible to host. mb() necessary?
+	 * Ensure updated avail->idx is visible to host.
 	 * For virtio on IA, the notificaiton is through io port operation
 	 * which is a serialization instruction itself.
 	 */