[dpdk-dev,v3,13/14] eal: introduce rte_smp_*mb() for memory barriers to use between lcores
Commit Message
This commit introduce rte_smp_mb(), rte_smp_wmb() and rte_smp_rmb(), in
order to enable memory barriers between lcores.
The patch does not provide any functional change for IA, the goal is to
have infrastructure for weakly ordered machines like ARM to work on DPDK.
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
drivers/net/virtio/virtqueue.h | 8 +++----
drivers/net/xenvirt/rte_eth_xenvirt.c | 4 ++--
drivers/net/xenvirt/virtqueue.h | 2 +-
.../common/include/arch/ppc_64/rte_atomic.h | 6 +++++
.../common/include/arch/tile/rte_atomic.h | 6 +++++
.../common/include/arch/x86/rte_atomic.h | 6 +++++
lib/librte_eal/common/include/generic/rte_atomic.h | 27 ++++++++++++++++++++++
lib/librte_ring/rte_ring.h | 8 +++----
8 files changed, 55 insertions(+), 12 deletions(-)
@@ -53,12 +53,10 @@ struct rte_mbuf;
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
- * This driver is for virtio_pci on SMP and therefore can assume
- * weaker (compiler barriers)
*/
-#define virtio_mb() rte_mb()
-#define virtio_rmb() rte_compiler_barrier()
-#define virtio_wmb() rte_compiler_barrier()
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
@@ -99,7 +99,7 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_used = VIRTQUEUE_NUSED(rxvq);
- rte_compiler_barrier(); /* rmb */
+ rte_smp_rmb();
num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
if (unlikely(num == 0)) return 0;
@@ -150,7 +150,7 @@ eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(txvq);
- rte_compiler_barrier(); /* rmb */
+ rte_smp_rmb();
num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);
@@ -151,7 +151,7 @@ vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
*/
avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
- rte_compiler_barrier(); /* wmb , for IA memory model barrier is enough*/
+ rte_smp_wmb();
vq->vq_ring.avail->idx++;
}
@@ -72,6 +72,12 @@ extern "C" {
*/
#define rte_rmb() {asm volatile("sync" : : : "memory"); }
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
/*------------------------- 16 bit atomic operations -------------------------*/
/* To be compatible with Power7, use GCC built-in functions for 16 bit
* operations */
@@ -79,6 +79,12 @@ static inline void rte_rmb(void)
__sync_synchronize();
}
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
#ifdef __cplusplus
}
#endif
@@ -53,6 +53,12 @@ extern "C" {
#define rte_rmb() _mm_lfence()
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
/*------------------------- 16 bit atomic operations -------------------------*/
#ifndef RTE_FORCE_INTRINSICS
@@ -72,6 +72,33 @@ static inline void rte_wmb(void);
*/
static inline void rte_rmb(void);
+/**
+ * General memory barrier between lcores
+ *
+ * Guarantees that the LOAD and STORE operations that precede the
+ * rte_smp_mb() call are globally visible across the lcores
+ * before the the LOAD and STORE operations that follows it.
+ */
+static inline void rte_smp_mb(void);
+
+/**
+ * Write memory barrier between lcores
+ *
+ * Guarantees that the STORE operations that precede the
+ * rte_smp_wmb() call are globally visible across the lcores
+ * before the the STORE operations that follows it.
+ */
+static inline void rte_smp_wmb(void);
+
+/**
+ * Read memory barrier between lcores
+ *
+ * Guarantees that the LOAD operations that precede the
+ * rte_smp_rmb() call are globally visible across the lcores
+ * before the the LOAD operations that follows it.
+ */
+static inline void rte_smp_rmb(void);
+
#endif /* __DOXYGEN__ */
/**
@@ -457,7 +457,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* write entries in ring */
ENQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -552,7 +552,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* write entries in ring */
ENQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -643,7 +643,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* copy in table */
DEQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_rmb();
/*
* If there are other dequeues in progress that preceded us,
@@ -727,7 +727,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
/* copy in table */
DEQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_rmb();
__RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;