net/virtio: add platform memory ordering feature support
Checks
Commit Message
VIRTIO_F_ORDER_PLATFORM is required to use proper memory barriers
in case of HW vhost implementations like vDPA.
DMA barriers (rte_cio_*) are sufficent for that purpose.
Previously known as VIRTIO_F_IO_BARRIER.
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
Based on "[RFC] net/virtio: use real barriers for vDPA".
RFC --> PATCH:
* Dropped vendor-specific hack to determine if we need real barriers.
* Added VIRTIO_F_ORDER_PLATFORM feature definition and checking.
Note: Patch to change the name of the feature from VIRTIO_F_IO_BARRIER
to VIRTIO_F_ORDER_PLATFORM is not merged yet:
https://www.mail-archive.com/virtio-dev@lists.oasis-open.org/msg04114.html
drivers/net/virtio/virtio_ethdev.c | 2 ++
drivers/net/virtio/virtio_ethdev.h | 3 ++-
drivers/net/virtio/virtio_pci.h | 7 ++++++
drivers/net/virtio/virtio_rxtx.c | 14 ++++++------
drivers/net/virtio/virtqueue.h | 35 +++++++++++++++++++++++++-----
5 files changed, 48 insertions(+), 13 deletions(-)
Comments
On Fri, Dec 14, 2018 at 06:38:12PM +0300, Ilya Maximets wrote:
> VIRTIO_F_ORDER_PLATFORM is required to use proper memory barriers
> in case of HW vhost implementations like vDPA.
>
> DMA barriers (rte_cio_*) are sufficent for that purpose.
>
> Previously known as VIRTIO_F_IO_BARRIER.
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>
> Based on "[RFC] net/virtio: use real barriers for vDPA".
>
> RFC --> PATCH:
> * Dropped vendor-specific hack to determine if we need real barriers.
> * Added VIRTIO_F_ORDER_PLATFORM feature definition and checking.
>
> Note: Patch to change the name of the feature from VIRTIO_F_IO_BARRIER
> to VIRTIO_F_ORDER_PLATFORM is not merged yet:
> https://www.mail-archive.com/virtio-dev@lists.oasis-open.org/msg04114.html
>
> drivers/net/virtio/virtio_ethdev.c | 2 ++
> drivers/net/virtio/virtio_ethdev.h | 3 ++-
> drivers/net/virtio/virtio_pci.h | 7 ++++++
> drivers/net/virtio/virtio_rxtx.c | 14 ++++++------
> drivers/net/virtio/virtqueue.h | 35 +++++++++++++++++++++++++-----
> 5 files changed, 48 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index cb2b2e0bf..5ae7a9650 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -1474,6 +1474,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
> if (virtio_negotiate_features(hw, req_features) < 0)
> return -1;
>
> + hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
> +
> if (!hw->virtio_user_dev) {
> pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
> rte_eth_copy_pci_info(eth_dev, pci_dev);
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index e0f80e5a4..c098e5ac0 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -34,7 +34,8 @@
> 1u << VIRTIO_RING_F_INDIRECT_DESC | \
> 1ULL << VIRTIO_F_VERSION_1 | \
> 1ULL << VIRTIO_F_IN_ORDER | \
> - 1ULL << VIRTIO_F_IOMMU_PLATFORM)
> + 1ULL << VIRTIO_F_IOMMU_PLATFORM | \
> + 1ULL << VIRTIO_F_ORDER_PLATFORM)
>
> #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
> (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
> diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
> index e961a58ca..e2f096185 100644
> --- a/drivers/net/virtio/virtio_pci.h
> +++ b/drivers/net/virtio/virtio_pci.h
> @@ -128,6 +128,12 @@ struct virtnet_ctl;
> */
> #define VIRTIO_F_IN_ORDER 35
>
> +/*
> + * This feature indicates that memory accesses by the driver and the device
> + * are ordered in a way described by the platform.
> + */
> +#define VIRTIO_F_ORDER_PLATFORM 36
> +
> /* The Guest publishes the used index for which it expects an interrupt
> * at the end of the avail ring. Host should ignore the avail->flags field. */
> /* The Host publishes the avail index for which it expects a kick
> @@ -240,6 +246,7 @@ struct virtio_hw {
> uint8_t use_simple_rx;
> uint8_t use_inorder_rx;
> uint8_t use_inorder_tx;
> + uint8_t weak_barriers;
> bool has_tx_offload;
> bool has_rx_offload;
> uint16_t port_id;
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index cb8f89f18..66195bf47 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -906,7 +906,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>
> nb_used = VIRTQUEUE_NUSED(vq);
>
> - virtio_rmb();
> + virtio_rmb(hw->weak_barriers);
>
> num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
> if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
> @@ -1017,7 +1017,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue,
> nb_used = RTE_MIN(nb_used, nb_pkts);
> nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
>
> - virtio_rmb();
> + virtio_rmb(hw->weak_barriers);
>
> PMD_RX_LOG(DEBUG, "used:%d", nb_used);
>
> @@ -1202,7 +1202,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>
> nb_used = VIRTQUEUE_NUSED(vq);
>
> - virtio_rmb();
> + virtio_rmb(hw->weak_barriers);
>
> PMD_RX_LOG(DEBUG, "used:%d", nb_used);
>
> @@ -1365,7 +1365,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> nb_used = VIRTQUEUE_NUSED(vq);
>
> - virtio_rmb();
> + virtio_rmb(hw->weak_barriers);
> if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> virtio_xmit_cleanup(vq, nb_used);
>
> @@ -1407,7 +1407,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> /* Positive value indicates it need free vring descriptors */
> if (unlikely(need > 0)) {
> nb_used = VIRTQUEUE_NUSED(vq);
> - virtio_rmb();
> + virtio_rmb(hw->weak_barriers);
> need = RTE_MIN(need, (int)nb_used);
>
> virtio_xmit_cleanup(vq, need);
> @@ -1463,7 +1463,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> nb_used = VIRTQUEUE_NUSED(vq);
>
> - virtio_rmb();
> + virtio_rmb(hw->weak_barriers);
> if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> virtio_xmit_cleanup_inorder(vq, nb_used);
>
> @@ -1511,7 +1511,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> need = slots - vq->vq_free_cnt;
> if (unlikely(need > 0)) {
> nb_used = VIRTQUEUE_NUSED(vq);
> - virtio_rmb();
> + virtio_rmb(hw->weak_barriers);
> need = RTE_MIN(need, (int)nb_used);
>
> virtio_xmit_cleanup_inorder(vq, need);
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index 26518ed98..6b9055a1f 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -19,15 +19,40 @@
> struct rte_mbuf;
>
> /*
> - * Per virtio_config.h in Linux.
> + * Per virtio_ring.h in Linux.
> * For virtio_pci on SMP, we don't need to order with respect to MMIO
> * accesses through relaxed memory I/O windows, so smp_mb() et al are
> * sufficient.
> *
> + * For using virtio to talk to real devices (eg. vDPA) we do need real
> + * barriers.
> */
> -#define virtio_mb() rte_smp_mb()
> -#define virtio_rmb() rte_smp_rmb()
> -#define virtio_wmb() rte_smp_wmb()
> +static inline void
> +virtio_mb(uint8_t weak_barriers)
> +{
> + if (weak_barriers)
> + rte_smp_mb();
> + else
> + rte_mb();
> +}
> +
Why doesn't rte_cio_rmb exit?
> +static inline void
> +virtio_rmb(uint8_t weak_barriers)
> +{
> + if (weak_barriers)
> + rte_smp_rmb();
> + else
> + rte_cio_rmb();
> +}
> +
> +static inline void
> +virtio_wmb(uint8_t weak_barriers)
> +{
> + if (weak_barriers)
> + rte_smp_wmb();
> + else
> + rte_cio_wmb();
> +}
>
> #ifdef RTE_PMD_PACKET_PREFETCH
> #define rte_packet_prefetch(p) rte_prefetch1(p)
> @@ -312,7 +337,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
> static inline void
> vq_update_avail_idx(struct virtqueue *vq)
> {
> - virtio_wmb();
> + virtio_wmb(vq->hw->weak_barriers);
> vq->vq_ring.avail->idx = vq->vq_avail_idx;
> }
>
> --
> 2.17.1
On 14.12.2018 20:00, Michael S. Tsirkin wrote:
> On Fri, Dec 14, 2018 at 06:38:12PM +0300, Ilya Maximets wrote:
>> VIRTIO_F_ORDER_PLATFORM is required to use proper memory barriers
>> in case of HW vhost implementations like vDPA.
>>
>> DMA barriers (rte_cio_*) are sufficent for that purpose.
>>
>> Previously known as VIRTIO_F_IO_BARRIER.
>>
>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>> ---
>>
>> Based on "[RFC] net/virtio: use real barriers for vDPA".
>>
>> RFC --> PATCH:
>> * Dropped vendor-specific hack to determine if we need real barriers.
>> * Added VIRTIO_F_ORDER_PLATFORM feature definition and checking.
>>
>> Note: Patch to change the name of the feature from VIRTIO_F_IO_BARRIER
>> to VIRTIO_F_ORDER_PLATFORM is not merged yet:
>> https://www.mail-archive.com/virtio-dev@lists.oasis-open.org/msg04114.html
>>
>> drivers/net/virtio/virtio_ethdev.c | 2 ++
>> drivers/net/virtio/virtio_ethdev.h | 3 ++-
>> drivers/net/virtio/virtio_pci.h | 7 ++++++
>> drivers/net/virtio/virtio_rxtx.c | 14 ++++++------
>> drivers/net/virtio/virtqueue.h | 35 +++++++++++++++++++++++++-----
>> 5 files changed, 48 insertions(+), 13 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>> index cb2b2e0bf..5ae7a9650 100644
>> --- a/drivers/net/virtio/virtio_ethdev.c
>> +++ b/drivers/net/virtio/virtio_ethdev.c
>> @@ -1474,6 +1474,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
>> if (virtio_negotiate_features(hw, req_features) < 0)
>> return -1;
>>
>> + hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
>> +
>> if (!hw->virtio_user_dev) {
>> pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
>> rte_eth_copy_pci_info(eth_dev, pci_dev);
>> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
>> index e0f80e5a4..c098e5ac0 100644
>> --- a/drivers/net/virtio/virtio_ethdev.h
>> +++ b/drivers/net/virtio/virtio_ethdev.h
>> @@ -34,7 +34,8 @@
>> 1u << VIRTIO_RING_F_INDIRECT_DESC | \
>> 1ULL << VIRTIO_F_VERSION_1 | \
>> 1ULL << VIRTIO_F_IN_ORDER | \
>> - 1ULL << VIRTIO_F_IOMMU_PLATFORM)
>> + 1ULL << VIRTIO_F_IOMMU_PLATFORM | \
>> + 1ULL << VIRTIO_F_ORDER_PLATFORM)
>>
>> #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
>> (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
>> diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
>> index e961a58ca..e2f096185 100644
>> --- a/drivers/net/virtio/virtio_pci.h
>> +++ b/drivers/net/virtio/virtio_pci.h
>> @@ -128,6 +128,12 @@ struct virtnet_ctl;
>> */
>> #define VIRTIO_F_IN_ORDER 35
>>
>> +/*
>> + * This feature indicates that memory accesses by the driver and the device
>> + * are ordered in a way described by the platform.
>> + */
>> +#define VIRTIO_F_ORDER_PLATFORM 36
>> +
>> /* The Guest publishes the used index for which it expects an interrupt
>> * at the end of the avail ring. Host should ignore the avail->flags field. */
>> /* The Host publishes the avail index for which it expects a kick
>> @@ -240,6 +246,7 @@ struct virtio_hw {
>> uint8_t use_simple_rx;
>> uint8_t use_inorder_rx;
>> uint8_t use_inorder_tx;
>> + uint8_t weak_barriers;
>> bool has_tx_offload;
>> bool has_rx_offload;
>> uint16_t port_id;
>> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
>> index cb8f89f18..66195bf47 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -906,7 +906,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>>
>> nb_used = VIRTQUEUE_NUSED(vq);
>>
>> - virtio_rmb();
>> + virtio_rmb(hw->weak_barriers);
>>
>> num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
>> if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
>> @@ -1017,7 +1017,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue,
>> nb_used = RTE_MIN(nb_used, nb_pkts);
>> nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
>>
>> - virtio_rmb();
>> + virtio_rmb(hw->weak_barriers);
>>
>> PMD_RX_LOG(DEBUG, "used:%d", nb_used);
>>
>> @@ -1202,7 +1202,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>>
>> nb_used = VIRTQUEUE_NUSED(vq);
>>
>> - virtio_rmb();
>> + virtio_rmb(hw->weak_barriers);
>>
>> PMD_RX_LOG(DEBUG, "used:%d", nb_used);
>>
>> @@ -1365,7 +1365,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>> PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
>> nb_used = VIRTQUEUE_NUSED(vq);
>>
>> - virtio_rmb();
>> + virtio_rmb(hw->weak_barriers);
>> if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
>> virtio_xmit_cleanup(vq, nb_used);
>>
>> @@ -1407,7 +1407,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>> /* Positive value indicates it need free vring descriptors */
>> if (unlikely(need > 0)) {
>> nb_used = VIRTQUEUE_NUSED(vq);
>> - virtio_rmb();
>> + virtio_rmb(hw->weak_barriers);
>> need = RTE_MIN(need, (int)nb_used);
>>
>> virtio_xmit_cleanup(vq, need);
>> @@ -1463,7 +1463,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>> PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
>> nb_used = VIRTQUEUE_NUSED(vq);
>>
>> - virtio_rmb();
>> + virtio_rmb(hw->weak_barriers);
>> if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
>> virtio_xmit_cleanup_inorder(vq, nb_used);
>>
>> @@ -1511,7 +1511,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>> need = slots - vq->vq_free_cnt;
>> if (unlikely(need > 0)) {
>> nb_used = VIRTQUEUE_NUSED(vq);
>> - virtio_rmb();
>> + virtio_rmb(hw->weak_barriers);
>> need = RTE_MIN(need, (int)nb_used);
>>
>> virtio_xmit_cleanup_inorder(vq, need);
>> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
>> index 26518ed98..6b9055a1f 100644
>> --- a/drivers/net/virtio/virtqueue.h
>> +++ b/drivers/net/virtio/virtqueue.h
>> @@ -19,15 +19,40 @@
>> struct rte_mbuf;
>>
>> /*
>> - * Per virtio_config.h in Linux.
>> + * Per virtio_ring.h in Linux.
>> * For virtio_pci on SMP, we don't need to order with respect to MMIO
>> * accesses through relaxed memory I/O windows, so smp_mb() et al are
>> * sufficient.
>> *
>> + * For using virtio to talk to real devices (eg. vDPA) we do need real
>> + * barriers.
>> */
>> -#define virtio_mb() rte_smp_mb()
>> -#define virtio_rmb() rte_smp_rmb()
>> -#define virtio_wmb() rte_smp_wmb()
>> +static inline void
>> +virtio_mb(uint8_t weak_barriers)
>> +{
>> + if (weak_barriers)
>> + rte_smp_mb();
>> + else
>> + rte_mb();
>> +}
>> +
>
> Why doesn't rte_cio_rmb exit?
Assuming your question was "Why doesn't rte_cio_mb exist?".
I guess 'cio' barriers was copied from 'dma_' barriers from kernel.
And 'rte_cio_mb' does not exist because there is no 'dma_mb' in kernel.
OTOH, maybe we can use 'rte_io_mb' here to be more consistent, but it
equals to the 'rte_mb' on all supported architectures. So, I'm not sure
if it's needed.
>
>> +static inline void
>> +virtio_rmb(uint8_t weak_barriers)
>> +{
>> + if (weak_barriers)
>> + rte_smp_rmb();
>> + else
>> + rte_cio_rmb();
>> +}
>> +
>> +static inline void
>> +virtio_wmb(uint8_t weak_barriers)
>> +{
>> + if (weak_barriers)
>> + rte_smp_wmb();
>> + else
>> + rte_cio_wmb();
>> +}
>>
>> #ifdef RTE_PMD_PACKET_PREFETCH
>> #define rte_packet_prefetch(p) rte_prefetch1(p)
>> @@ -312,7 +337,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
>> static inline void
>> vq_update_avail_idx(struct virtqueue *vq)
>> {
>> - virtio_wmb();
>> + virtio_wmb(vq->hw->weak_barriers);
>> vq->vq_ring.avail->idx = vq->vq_avail_idx;
>> }
>>
>> --
>> 2.17.1
>
>
@@ -1474,6 +1474,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
+ hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
+
if (!hw->virtio_user_dev) {
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -34,7 +34,8 @@
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IN_ORDER | \
- 1ULL << VIRTIO_F_IOMMU_PLATFORM)
+ 1ULL << VIRTIO_F_IOMMU_PLATFORM | \
+ 1ULL << VIRTIO_F_ORDER_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
(VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
@@ -128,6 +128,12 @@ struct virtnet_ctl;
*/
#define VIRTIO_F_IN_ORDER 35
+/*
+ * This feature indicates that memory accesses by the driver and the device
+ * are ordered in a way described by the platform.
+ */
+#define VIRTIO_F_ORDER_PLATFORM 36
+
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
@@ -240,6 +246,7 @@ struct virtio_hw {
uint8_t use_simple_rx;
uint8_t use_inorder_rx;
uint8_t use_inorder_tx;
+ uint8_t weak_barriers;
bool has_tx_offload;
bool has_rx_offload;
uint16_t port_id;
@@ -906,7 +906,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
@@ -1017,7 +1017,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue,
nb_used = RTE_MIN(nb_used, nb_pkts);
nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
@@ -1202,7 +1202,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
@@ -1365,7 +1365,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup(vq, nb_used);
@@ -1407,7 +1407,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup(vq, need);
@@ -1463,7 +1463,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
@@ -1511,7 +1511,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup_inorder(vq, need);
@@ -19,15 +19,40 @@
struct rte_mbuf;
/*
- * Per virtio_config.h in Linux.
+ * Per virtio_ring.h in Linux.
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
+ * For using virtio to talk to real devices (eg. vDPA) we do need real
+ * barriers.
*/
-#define virtio_mb() rte_smp_mb()
-#define virtio_rmb() rte_smp_rmb()
-#define virtio_wmb() rte_smp_wmb()
+static inline void
+virtio_mb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_mb();
+ else
+ rte_mb();
+}
+
+static inline void
+virtio_rmb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_rmb();
+ else
+ rte_cio_rmb();
+}
+
+static inline void
+virtio_wmb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_wmb();
+ else
+ rte_cio_wmb();
+}
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
@@ -312,7 +337,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
- virtio_wmb();
+ virtio_wmb(vq->hw->weak_barriers);
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}