[dpdk-dev,v5,resend,05/12] vhost: add VHOST_USER_SET_VRING_ENABLE message
Commit Message
From: Changchun Ouyang <changchun.ouyang@intel.com>
This message is used to enable/disable a specific vring queue pair.
The first queue pair is enabled by default.
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
---
lib/librte_vhost/rte_virtio_net.h | 1 +
lib/librte_vhost/vhost_rxtx.c | 10 ++++++++++
lib/librte_vhost/vhost_user/vhost-net-user.c | 5 +++++
lib/librte_vhost/vhost_user/vhost-net-user.h | 1 +
lib/librte_vhost/vhost_user/virtio-net-user.c | 22 ++++++++++++++++++++++
lib/librte_vhost/vhost_user/virtio-net-user.h | 3 +++
lib/librte_vhost/virtio-net.c | 12 +++++++++---
7 files changed, 51 insertions(+), 3 deletions(-)
Comments
On Fri, Sep 18, 2015 at 11:10:54PM +0800, Yuanhan Liu wrote:
> From: Changchun Ouyang <changchun.ouyang@intel.com>
>
> This message is used to enable/disable a specific vring queue pair.
> The first queue pair is enabled by default.
>
> Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> ---
> lib/librte_vhost/rte_virtio_net.h | 1 +
> lib/librte_vhost/vhost_rxtx.c | 10 ++++++++++
> lib/librte_vhost/vhost_user/vhost-net-user.c | 5 +++++
> lib/librte_vhost/vhost_user/vhost-net-user.h | 1 +
> lib/librte_vhost/vhost_user/virtio-net-user.c | 22 ++++++++++++++++++++++
> lib/librte_vhost/vhost_user/virtio-net-user.h | 3 +++
> lib/librte_vhost/virtio-net.c | 12 +++++++++---
> 7 files changed, 51 insertions(+), 3 deletions(-)
>
> diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
> index 5dd6493..08b69df 100644
> --- a/lib/librte_vhost/rte_virtio_net.h
> +++ b/lib/librte_vhost/rte_virtio_net.h
> @@ -89,6 +89,7 @@ struct vhost_virtqueue {
> volatile uint16_t last_used_idx_res; /**< Used for multiple devices reserving buffers. */
> int callfd; /**< Used to notify the guest (trigger interrupt). */
> int kickfd; /**< Currently unused as polling mode is enabled. */
> + int enabled;
> struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
> } __rte_cache_aligned;
>
> diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
> index a4ab6ca..aa9ccda 100644
> --- a/lib/librte_vhost/vhost_rxtx.c
> +++ b/lib/librte_vhost/vhost_rxtx.c
> @@ -86,6 +86,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
> }
>
> vq = dev->virtqueue[queue_id];
> + if (unlikely(vq->enabled == 0))
> + return 0;
> +
> count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
>
> /*
> @@ -278,6 +281,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
> * (guest physical addr -> vhost virtual addr)
> */
> vq = dev->virtqueue[queue_id];
> +
> vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
> vb_hdr_addr = vb_addr;
>
> @@ -485,6 +489,9 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
> }
>
> vq = dev->virtqueue[queue_id];
> + if (unlikely(vq->enabled == 0))
> + return 0;
> +
> count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
>
> if (count == 0)
> @@ -583,6 +590,9 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
> }
>
> vq = dev->virtqueue[queue_id];
> + if (unlikely(vq->enabled == 0))
> + return 0;
> +
> avail_idx = *((volatile uint16_t *)&vq->avail->idx);
>
> /* If there are no available buffers then return. */
> diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.c b/lib/librte_vhost/vhost_user/vhost-net-user.c
> index 8675cd4..f681676 100644
> --- a/lib/librte_vhost/vhost_user/vhost-net-user.c
> +++ b/lib/librte_vhost/vhost_user/vhost-net-user.c
> @@ -99,6 +99,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
> [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
> [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
> [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
> + [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
> };
>
> /**
> @@ -428,6 +429,10 @@ vserver_message_handler(int connfd, void *dat, int *remove)
> send_vhost_message(connfd, &msg);
> break;
>
> + case VHOST_USER_SET_VRING_ENABLE:
> + user_set_vring_enable(ctx, &msg.payload.state);
> + break;
> +
> default:
> break;
>
> diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.h b/lib/librte_vhost/vhost_user/vhost-net-user.h
> index 389d21d..38637cc 100644
> --- a/lib/librte_vhost/vhost_user/vhost-net-user.h
> +++ b/lib/librte_vhost/vhost_user/vhost-net-user.h
> @@ -66,6 +66,7 @@ typedef enum VhostUserRequest {
> VHOST_USER_GET_PROTOCOL_FEATURES = 15,
> VHOST_USER_SET_PROTOCOL_FEATURES = 16,
> VHOST_USER_GET_QUEUE_NUM = 17,
> + VHOST_USER_SET_VRING_ENABLE = 18,
> VHOST_USER_MAX
> } VhostUserRequest;
>
> diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.c b/lib/librte_vhost/vhost_user/virtio-net-user.c
> index e83d279..9871f20 100644
> --- a/lib/librte_vhost/vhost_user/virtio-net-user.c
> +++ b/lib/librte_vhost/vhost_user/virtio-net-user.c
> @@ -306,6 +306,28 @@ user_get_vring_base(struct vhost_device_ctx ctx,
> return 0;
> }
>
> +/*
> + * when virtio queues are ready to work, qemu will send us to
> + * enable the virtio queue pair.
> + */
> +int
> +user_set_vring_enable(struct vhost_device_ctx ctx,
> + struct vhost_vring_state *state)
> +{
> + struct virtio_net *dev = get_device(ctx);
> + uint32_t base_idx = state->index;
> + int enabled = (int)state->num;
> +
> + RTE_LOG(INFO, VHOST_CONFIG,
> + "set queue enable: %d to qp idx: %d\n",
> + enabled, state->index);
> +
> + dev->virtqueue[base_idx + VIRTIO_RXQ]->enabled = enabled;
> + dev->virtqueue[base_idx + VIRTIO_TXQ]->enabled = enabled;
> +
> + return 0;
> +}
> +
> void
> user_destroy_device(struct vhost_device_ctx ctx)
> {
It might be a good idea to flush any packets being processed
on relevant cores at this point.
> diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.h b/lib/librte_vhost/vhost_user/virtio-net-user.h
> index e7a6ff4..d46057e 100644
> --- a/lib/librte_vhost/vhost_user/virtio-net-user.h
> +++ b/lib/librte_vhost/vhost_user/virtio-net-user.h
> @@ -50,5 +50,8 @@ void user_set_protocol_features(struct vhost_device_ctx ctx,
>
> int user_get_vring_base(struct vhost_device_ctx, struct vhost_vring_state *);
>
> +int user_set_vring_enable(struct vhost_device_ctx ctx,
> + struct vhost_vring_state *state);
> +
> void user_destroy_device(struct vhost_device_ctx);
> #endif
> diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
> index 643a92e..5fe1ad6 100644
> --- a/lib/librte_vhost/virtio-net.c
> +++ b/lib/librte_vhost/virtio-net.c
> @@ -254,7 +254,7 @@ rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
> }
>
> static void
> -init_vring_queue(struct vhost_virtqueue *vq)
> +init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
> {
> memset(vq, 0, sizeof(struct vhost_virtqueue));
>
> @@ -263,13 +263,19 @@ init_vring_queue(struct vhost_virtqueue *vq)
>
> /* Backends are set to -1 indicating an inactive device. */
> vq->backend = -1;
> +
> + /* always set the default vq pair to enabled */
> + if (qp_idx == 0)
> + vq->enabled = 1;
> }
>
> static void
> init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
> {
> - init_vring_queue(dev->virtqueue[qp_idx * VIRTIO_QNUM + VIRTIO_RXQ]);
> - init_vring_queue(dev->virtqueue[qp_idx * VIRTIO_QNUM + VIRTIO_TXQ]);
> + uint32_t base_idx = qp_idx * VIRTIO_QNUM;
> +
> + init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
> + init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
> }
>
> static int
> --
> 1.9.0
On Sun, Sep 20, 2015 at 12:37:35PM +0300, Michael S. Tsirkin wrote:
> On Fri, Sep 18, 2015 at 11:10:54PM +0800, Yuanhan Liu wrote:
> > From: Changchun Ouyang <changchun.ouyang@intel.com>
> >
> > This message is used to enable/disable a specific vring queue pair.
> > The first queue pair is enabled by default.
> >
> > Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> > Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> > ---
[snip...]
> > void
> > user_destroy_device(struct vhost_device_ctx ctx)
> > {
>
> It might be a good idea to flush any packets being processed
> on relevant cores at this point.
They are offloaded to the application (examples/vhost/vhost-switch in
this case).
user_destroy_device will invoke the application's "destroy_device()"
callback in the end, which, in our case, will set "remove" flag. The
core worker will then drain and free the RX queue and free TX queue
once the "remove" flag is set.
--yliu
On Mon, Sep 21, 2015 at 10:22:52AM +0800, Yuanhan Liu wrote:
> On Sun, Sep 20, 2015 at 12:37:35PM +0300, Michael S. Tsirkin wrote:
> > On Fri, Sep 18, 2015 at 11:10:54PM +0800, Yuanhan Liu wrote:
> > > From: Changchun Ouyang <changchun.ouyang@intel.com>
> > >
> > > This message is used to enable/disable a specific vring queue pair.
> > > The first queue pair is enabled by default.
> > >
> > > Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> > > Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> > > ---
> [snip...]
> > > void
> > > user_destroy_device(struct vhost_device_ctx ctx)
> > > {
> >
> > It might be a good idea to flush any packets being processed
> > on relevant cores at this point.
>
> They are offloaded to the application (examples/vhost/vhost-switch in
> this case).
>
> user_destroy_device will invoke the application's "destroy_device()"
> callback in the end, which, in our case, will set "remove" flag. The
> core worker will then drain and free the RX queue and free TX queue
> once the "remove" flag is set.
>
> --yliu
Oh, I really meant user_set_vring_enable.
Sorry about the confusion.
On Mon, Sep 21, 2015 at 12:02:20PM +0300, Michael S. Tsirkin wrote:
> On Mon, Sep 21, 2015 at 10:22:52AM +0800, Yuanhan Liu wrote:
> > On Sun, Sep 20, 2015 at 12:37:35PM +0300, Michael S. Tsirkin wrote:
> > > On Fri, Sep 18, 2015 at 11:10:54PM +0800, Yuanhan Liu wrote:
> > > > From: Changchun Ouyang <changchun.ouyang@intel.com>
> > > >
> > > > This message is used to enable/disable a specific vring queue pair.
> > > > The first queue pair is enabled by default.
> > > >
> > > > Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> > > > Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> > > > ---
> > [snip...]
> > > > void
> > > > user_destroy_device(struct vhost_device_ctx ctx)
> > > > {
> > >
> > > It might be a good idea to flush any packets being processed
> > > on relevant cores at this point.
> >
> > They are offloaded to the application (examples/vhost/vhost-switch in
> > this case).
> >
> > user_destroy_device will invoke the application's "destroy_device()"
> > callback in the end, which, in our case, will set "remove" flag. The
> > core worker will then drain and free the RX queue and free TX queue
> > once the "remove" flag is set.
> >
> > --yliu
>
>
> Oh, I really meant user_set_vring_enable.
Will a per-vring callback help then?
We may prototype the callback to "vring_state_changed(dev, vring_index)"
so that the application could either, as you suggested, flushes any packets
haven't been processed yet, or simply drops them.
--yliu
On Tue, Sep 22, 2015 at 10:21:22AM +0800, Yuanhan Liu wrote:
> On Mon, Sep 21, 2015 at 12:02:20PM +0300, Michael S. Tsirkin wrote:
> > On Mon, Sep 21, 2015 at 10:22:52AM +0800, Yuanhan Liu wrote:
> > > On Sun, Sep 20, 2015 at 12:37:35PM +0300, Michael S. Tsirkin wrote:
> > > > On Fri, Sep 18, 2015 at 11:10:54PM +0800, Yuanhan Liu wrote:
> > > > > From: Changchun Ouyang <changchun.ouyang@intel.com>
> > > > >
> > > > > This message is used to enable/disable a specific vring queue pair.
> > > > > The first queue pair is enabled by default.
> > > > >
> > > > > Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> > > > > Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> > > > > ---
> > > [snip...]
> > > > > void
> > > > > user_destroy_device(struct vhost_device_ctx ctx)
> > > > > {
> > > >
> > > > It might be a good idea to flush any packets being processed
> > > > on relevant cores at this point.
> > >
> > > They are offloaded to the application (examples/vhost/vhost-switch in
> > > this case).
> > >
> > > user_destroy_device will invoke the application's "destroy_device()"
> > > callback in the end, which, in our case, will set "remove" flag. The
> > > core worker will then drain and free the RX queue and free TX queue
> > > once the "remove" flag is set.
> > >
> > > --yliu
> >
> >
> > Oh, I really meant user_set_vring_enable.
>
> Will a per-vring callback help then?
>
> We may prototype the callback to "vring_state_changed(dev, vring_index)"
It should be "vring_state_changed(dev, vring_index, enable)".
> so that the application could either, as you suggested, flushes any packets
> haven't been processed yet, or simply drops them.
After putting more thoughts on that, I guess it's also necessary to
inform the application that before receiving an "enabled" state for
a specific vring, we should never use it for data transferring, as
we just enable one queue pair by default.
And I think we could do both in vring_state_changed() callback.
Michael, what do you think of it?
--yliu
@@ -89,6 +89,7 @@ struct vhost_virtqueue {
volatile uint16_t last_used_idx_res; /**< Used for multiple devices reserving buffers. */
int callfd; /**< Used to notify the guest (trigger interrupt). */
int kickfd; /**< Currently unused as polling mode is enabled. */
+ int enabled;
struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
} __rte_cache_aligned;
@@ -86,6 +86,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
}
vq = dev->virtqueue[queue_id];
+ if (unlikely(vq->enabled == 0))
+ return 0;
+
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/*
@@ -278,6 +281,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
* (guest physical addr -> vhost virtual addr)
*/
vq = dev->virtqueue[queue_id];
+
vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
vb_hdr_addr = vb_addr;
@@ -485,6 +489,9 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
}
vq = dev->virtqueue[queue_id];
+ if (unlikely(vq->enabled == 0))
+ return 0;
+
count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
if (count == 0)
@@ -583,6 +590,9 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
}
vq = dev->virtqueue[queue_id];
+ if (unlikely(vq->enabled == 0))
+ return 0;
+
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
/* If there are no available buffers then return. */
@@ -99,6 +99,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
[VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
[VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
+ [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
};
/**
@@ -428,6 +429,10 @@ vserver_message_handler(int connfd, void *dat, int *remove)
send_vhost_message(connfd, &msg);
break;
+ case VHOST_USER_SET_VRING_ENABLE:
+ user_set_vring_enable(ctx, &msg.payload.state);
+ break;
+
default:
break;
@@ -66,6 +66,7 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_PROTOCOL_FEATURES = 15,
VHOST_USER_SET_PROTOCOL_FEATURES = 16,
VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_MAX
} VhostUserRequest;
@@ -306,6 +306,28 @@ user_get_vring_base(struct vhost_device_ctx ctx,
return 0;
}
+/*
+ * when virtio queues are ready to work, qemu will send us to
+ * enable the virtio queue pair.
+ */
+int
+user_set_vring_enable(struct vhost_device_ctx ctx,
+ struct vhost_vring_state *state)
+{
+ struct virtio_net *dev = get_device(ctx);
+ uint32_t base_idx = state->index;
+ int enabled = (int)state->num;
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "set queue enable: %d to qp idx: %d\n",
+ enabled, state->index);
+
+ dev->virtqueue[base_idx + VIRTIO_RXQ]->enabled = enabled;
+ dev->virtqueue[base_idx + VIRTIO_TXQ]->enabled = enabled;
+
+ return 0;
+}
+
void
user_destroy_device(struct vhost_device_ctx ctx)
{
@@ -50,5 +50,8 @@ void user_set_protocol_features(struct vhost_device_ctx ctx,
int user_get_vring_base(struct vhost_device_ctx, struct vhost_vring_state *);
+int user_set_vring_enable(struct vhost_device_ctx ctx,
+ struct vhost_vring_state *state);
+
void user_destroy_device(struct vhost_device_ctx);
#endif
@@ -254,7 +254,7 @@ rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
}
static void
-init_vring_queue(struct vhost_virtqueue *vq)
+init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
{
memset(vq, 0, sizeof(struct vhost_virtqueue));
@@ -263,13 +263,19 @@ init_vring_queue(struct vhost_virtqueue *vq)
/* Backends are set to -1 indicating an inactive device. */
vq->backend = -1;
+
+ /* always set the default vq pair to enabled */
+ if (qp_idx == 0)
+ vq->enabled = 1;
}
static void
init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
{
- init_vring_queue(dev->virtqueue[qp_idx * VIRTIO_QNUM + VIRTIO_RXQ]);
- init_vring_queue(dev->virtqueue[qp_idx * VIRTIO_QNUM + VIRTIO_TXQ]);
+ uint32_t base_idx = qp_idx * VIRTIO_QNUM;
+
+ init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
+ init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
}
static int