vhost: avoid potential null pointer access
Checks
Commit Message
If the user calls rte_vhost_vring_call() on a ring that has been
invalidated, we will encounter SEGV.
We should check the pointer firstly before accessing it.
Signed-off-by: Li Feng <fengli@smartx.com>
---
lib/vhost/vhost.c | 7 ++++---
lib/vhost/vhost.h | 12 ++++++++++--
2 files changed, 14 insertions(+), 5 deletions(-)
Comments
Sorry, ignore this patch, there is a rebase error.
I will fix it in V2.
> On 30 Aug 2023, at 4:47 PM, Li Feng <fengli@smartx.com> wrote:
>
> If the user calls rte_vhost_vring_call() on a ring that has been
> invalidated, we will encounter SEGV.
>
> We should check the pointer firstly before accessing it.
>
> Signed-off-by: Li Feng <fengli@smartx.com>
> ---
> lib/vhost/vhost.c | 7 ++++---
> lib/vhost/vhost.h | 12 ++++++++++--
> 2 files changed, 14 insertions(+), 5 deletions(-)
>
> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
> index eb6309b681..3af0307cd6 100644
> --- a/lib/vhost/vhost.c
> +++ b/lib/vhost/vhost.c
> @@ -1327,6 +1327,7 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
> {
> struct virtio_net *dev;
> struct vhost_virtqueue *vq;
> + int ret = 0;
>
> dev = get_device(vid);
> if (!dev)
> @@ -1342,13 +1343,13 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
> rte_rwlock_read_lock(&vq->access_lock);
>
> if (vq_is_packed(dev))
> - vhost_vring_call_packed(dev, vq);
> + ret = vhost_vring_call_packed(dev, vq);
> else
> - vhost_vring_call_split(dev, vq);
> + ret = vhost_vring_call_split(dev, vq);
>
> rte_rwlock_read_unlock(&vq->access_lock);
>
> - return 0;
> + return ret;
> }
>
> int
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 9723429b1c..f38e6d16c9 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -930,7 +930,7 @@ vhost_vring_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq)
> dev->notify_ops->guest_notified(dev->vid);
> }
>
> -static __rte_always_inline void
> +static __rte_always_inline int
> vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
> {
> /* Flush used->idx update before we read avail->flags. */
> @@ -953,13 +953,17 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
> unlikely(!signalled_used_valid))
> vhost_vring_inject_irq(dev, vq);
> } else {
> + if (!vq->avail)
> + return -1;
> +
> /* Kick the guest if necessary. */
> if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
> vhost_vring_inject_irq(dev, vq);
> }
> + return 0;
> }
>
> -static __rte_always_inline void
> +static __rte_always_inline int
> vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
> {
> uint16_t old, new, off, off_wrap;
> @@ -968,6 +972,9 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
> /* Flush used desc update. */
> rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
>
> + if (!vq->driver_event)
> + return -1;
> +
> if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
> if (vq->driver_event->flags !=
> VRING_EVENT_F_DISABLE)
> @@ -1030,6 +1037,7 @@ restore_mbuf(struct rte_mbuf *m)
> rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
> m = m->next;
> }
> + return 0;
> }
>
> static __rte_always_inline bool
> --
> 2.41.0
>
@@ -1327,6 +1327,7 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
+ int ret = 0;
dev = get_device(vid);
if (!dev)
@@ -1342,13 +1343,13 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
rte_rwlock_read_lock(&vq->access_lock);
if (vq_is_packed(dev))
- vhost_vring_call_packed(dev, vq);
+ ret = vhost_vring_call_packed(dev, vq);
else
- vhost_vring_call_split(dev, vq);
+ ret = vhost_vring_call_split(dev, vq);
rte_rwlock_read_unlock(&vq->access_lock);
- return 0;
+ return ret;
}
int
@@ -930,7 +930,7 @@ vhost_vring_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq)
dev->notify_ops->guest_notified(dev->vid);
}
-static __rte_always_inline void
+static __rte_always_inline int
vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
/* Flush used->idx update before we read avail->flags. */
@@ -953,13 +953,17 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
unlikely(!signalled_used_valid))
vhost_vring_inject_irq(dev, vq);
} else {
+ if (!vq->avail)
+ return -1;
+
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
vhost_vring_inject_irq(dev, vq);
}
+ return 0;
}
-static __rte_always_inline void
+static __rte_always_inline int
vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint16_t old, new, off, off_wrap;
@@ -968,6 +972,9 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
/* Flush used desc update. */
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ if (!vq->driver_event)
+ return -1;
+
if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
if (vq->driver_event->flags !=
VRING_EVENT_F_DISABLE)
@@ -1030,6 +1037,7 @@ restore_mbuf(struct rte_mbuf *m)
rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
m = m->next;
}
+ return 0;
}
static __rte_always_inline bool