[v1,3/4] vhost: improve device ready definition

Message ID 1592497686-433697-4-git-send-email-matan@mellanox.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: improve ready state |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Matan Azrad June 18, 2020, 4:28 p.m. UTC
  Some guest drivers may not configure disabled virtio queues.

In this case, the vhost management never triggers the vDPA device
configuration because it waits to the device to be ready.

The current ready state means that all the virtio queues should be
configured regardless the enablement status.

In order to support this case, this patch changes the ready state:
The device is ready when at least 1 queue pair is configured and
enabled.

So, now, the vDPA driver will be configured when the first queue pair is
configured and enabled.

Also the queue state operation is change to the next rules:
	1. queue becomes ready (enabled and fully configured) -
		set_vring_state(enabled).
	2. queue becomes not ready - set_vring_state(disabled).
	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage was
		handled - set_vring_state(enabled).

The parallel operations for the application are adjusted too.

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 lib/librte_vhost/vhost_user.c | 51 ++++++++++++++++++++++++++++---------------
 1 file changed, 33 insertions(+), 18 deletions(-)
  

Comments

Maxime Coquelin June 19, 2020, 7:41 a.m. UTC | #1
On 6/18/20 6:28 PM, Matan Azrad wrote:
> Some guest drivers may not configure disabled virtio queues.
> 
> In this case, the vhost management never triggers the vDPA device
> configuration because it waits to the device to be ready.

This is not vDPA-only, even with SW datapath the application's
new_device callback never gets called.

> The current ready state means that all the virtio queues should be
> configured regardless the enablement status.
> 
> In order to support this case, this patch changes the ready state:
> The device is ready when at least 1 queue pair is configured and
> enabled.
> 
> So, now, the vDPA driver will be configured when the first queue pair is
> configured and enabled.
> 
> Also the queue state operation is change to the next rules:
> 	1. queue becomes ready (enabled and fully configured) -
> 		set_vring_state(enabled).
> 	2. queue becomes not ready - set_vring_state(disabled).
> 	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage was
> 		handled - set_vring_state(enabled).
> 
> The parallel operations for the application are adjusted too.
> 
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> ---
>  lib/librte_vhost/vhost_user.c | 51 ++++++++++++++++++++++++++++---------------
>  1 file changed, 33 insertions(+), 18 deletions(-)
> 
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index b0849b9..cfd5f27 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -1295,7 +1295,7 @@
>  {
>  	bool rings_ok;
>  
> -	if (!vq)
> +	if (!vq || !vq->enabled)
>  		return false;
>  
>  	if (vq_is_packed(dev))
> @@ -1309,24 +1309,27 @@
>  	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
>  }
>  
> +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
> +
>  static int
>  virtio_is_ready(struct virtio_net *dev)
>  {
>  	struct vhost_virtqueue *vq;
>  	uint32_t i;
>  
> -	if (dev->nr_vring == 0)
> +	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
>  		return 0;
>  
> -	for (i = 0; i < dev->nr_vring; i++) {
> +	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
>  		vq = dev->virtqueue[i];
>  
>  		if (!vq_is_ready(dev, vq))
>  			return 0;
>  	}
>  
> -	VHOST_LOG_CONFIG(INFO,
> -		"virtio is now ready for processing.\n");
> +	if (!(dev->flags & VIRTIO_DEV_READY))
> +		VHOST_LOG_CONFIG(INFO,
> +			"virtio is now ready for processing.\n");
>  	return 1;
>  }
>  
> @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
>  	struct virtio_net *dev = *pdev;
>  	int enable = (int)msg->payload.state.num;
>  	int index = (int)msg->payload.state.index;
> -	struct rte_vdpa_device *vdpa_dev;
> -	int did = -1;
>  
>  	if (validate_msg_fds(msg, 0) != 0)
>  		return RTE_VHOST_MSG_RESULT_ERR;
> @@ -1980,15 +1981,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
>  		"set queue enable: %d to qp idx: %d\n",
>  		enable, index);
>  
> -	did = dev->vdpa_dev_id;
> -	vdpa_dev = rte_vdpa_get_device(did);
> -	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> -		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
> -
> -	if (dev->notify_ops->vring_state_changed)
> -		dev->notify_ops->vring_state_changed(dev->vid,
> -				index, enable);
> -
>  	/* On disable, rings have to be stopped being processed. */
>  	if (!enable && dev->dequeue_zero_copy)
>  		drain_zmbuf_list(dev->virtqueue[index]);
> @@ -2622,11 +2614,13 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>  	struct virtio_net *dev;
>  	struct VhostUserMsg msg;
>  	struct rte_vdpa_device *vdpa_dev;
> +	bool ready[VHOST_MAX_VRING];
>  	int did = -1;
>  	int ret;
>  	int unlock_required = 0;
>  	bool handled;
>  	int request;
> +	uint32_t i;
>  
>  	dev = get_device(vid);
>  	if (dev == NULL)
> @@ -2668,6 +2662,10 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>  		VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
>  	}
>  
> +	/* Save ready status for all the VQs before message handle. */
> +	for (i = 0; i < VHOST_MAX_VRING; i++)
> +		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
> +

This big array can be avoided if you save the ready status in the
virtqueue once message have been handled.

>  	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
>  	if (ret < 0) {
>  		VHOST_LOG_CONFIG(ERR,
> @@ -2802,6 +2800,25 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>  		return -1;
>  	}
>  
> +	did = dev->vdpa_dev_id;
> +	vdpa_dev = rte_vdpa_get_device(did);
> +	/* Update ready status. */
> +	for (i = 0; i < VHOST_MAX_VRING; i++) {
> +		bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]);
> +
> +		if ((cur_ready && request == VHOST_USER_SET_VRING_ENABLE &&
> +				i == msg.payload.state.index) ||

Couldn't we remove above condition? Aren't the callbacks already called
in the set_vring_enable handler?

> +				cur_ready != ready[i]) {
> +			if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> +				vdpa_dev->ops->set_vring_state(dev->vid, i,
> +								(int)cur_ready);
> +
> +			if (dev->notify_ops->vring_state_changed)
> +				dev->notify_ops->vring_state_changed(dev->vid,
> +							i, (int)cur_ready);
> +		}
> +	}

I think we should move this into a dedicated function, which we would
call in every message handler that can modify the ready state.

Doing so, we would not have to assume the master sent us disable request
for the queue before, ans also would have proper synchronization if the
request uses reply-ack feature as it could assume the backend is no more
processing the ring once reply-ack is received.

>  	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
>  		dev->flags |= VIRTIO_DEV_READY;
>  
> @@ -2816,8 +2833,6 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>  		}
>  	}
>  
> -	did = dev->vdpa_dev_id;
> -	vdpa_dev = rte_vdpa_get_device(did);
>  	if (vdpa_dev && virtio_is_ready(dev) &&
>  			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
>  			msg.request.master == VHOST_USER_SET_VRING_CALL) {

Shouldn't check on SET_VRING_CALL above be removed?
  
Maxime Coquelin June 19, 2020, 12:04 p.m. UTC | #2
Hi Matan,

On 6/19/20 9:41 AM, Maxime Coquelin wrote:
> 
> 
> On 6/18/20 6:28 PM, Matan Azrad wrote:
>> Some guest drivers may not configure disabled virtio queues.
>>
>> In this case, the vhost management never triggers the vDPA device
>> configuration because it waits to the device to be ready.
> 
> This is not vDPA-only, even with SW datapath the application's
> new_device callback never gets called.
> 
>> The current ready state means that all the virtio queues should be
>> configured regardless the enablement status.
>>
>> In order to support this case, this patch changes the ready state:
>> The device is ready when at least 1 queue pair is configured and
>> enabled.
>>
>> So, now, the vDPA driver will be configured when the first queue pair is
>> configured and enabled.
>>
>> Also the queue state operation is change to the next rules:
>> 	1. queue becomes ready (enabled and fully configured) -
>> 		set_vring_state(enabled).
>> 	2. queue becomes not ready - set_vring_state(disabled).
>> 	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage was
>> 		handled - set_vring_state(enabled).
>>
>> The parallel operations for the application are adjusted too.
>>
>> Signed-off-by: Matan Azrad <matan@mellanox.com>
>> ---
>>  lib/librte_vhost/vhost_user.c | 51 ++++++++++++++++++++++++++++---------------
>>  1 file changed, 33 insertions(+), 18 deletions(-)
>>
>> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
>> index b0849b9..cfd5f27 100644
>> --- a/lib/librte_vhost/vhost_user.c
>> +++ b/lib/librte_vhost/vhost_user.c
>> @@ -1295,7 +1295,7 @@
>>  {
>>  	bool rings_ok;
>>  
>> -	if (!vq)
>> +	if (!vq || !vq->enabled)
>>  		return false;
>>  
>>  	if (vq_is_packed(dev))
>> @@ -1309,24 +1309,27 @@
>>  	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
>>  }
>>  
>> +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
>> +
>>  static int
>>  virtio_is_ready(struct virtio_net *dev)
>>  {
>>  	struct vhost_virtqueue *vq;
>>  	uint32_t i;
>>  
>> -	if (dev->nr_vring == 0)
>> +	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
>>  		return 0;
>>  
>> -	for (i = 0; i < dev->nr_vring; i++) {
>> +	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
>>  		vq = dev->virtqueue[i];
>>  
>>  		if (!vq_is_ready(dev, vq))
>>  			return 0;
>>  	}
>>  
>> -	VHOST_LOG_CONFIG(INFO,
>> -		"virtio is now ready for processing.\n");
>> +	if (!(dev->flags & VIRTIO_DEV_READY))
>> +		VHOST_LOG_CONFIG(INFO,
>> +			"virtio is now ready for processing.\n");
>>  	return 1;
>>  }
>>  
>> @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
>>  	struct virtio_net *dev = *pdev;
>>  	int enable = (int)msg->payload.state.num;
>>  	int index = (int)msg->payload.state.index;
>> -	struct rte_vdpa_device *vdpa_dev;
>> -	int did = -1;
>>  
>>  	if (validate_msg_fds(msg, 0) != 0)
>>  		return RTE_VHOST_MSG_RESULT_ERR;
>> @@ -1980,15 +1981,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
>>  		"set queue enable: %d to qp idx: %d\n",
>>  		enable, index);
>>  
>> -	did = dev->vdpa_dev_id;
>> -	vdpa_dev = rte_vdpa_get_device(did);
>> -	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
>> -		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
>> -
>> -	if (dev->notify_ops->vring_state_changed)
>> -		dev->notify_ops->vring_state_changed(dev->vid,
>> -				index, enable);
>> -
>>  	/* On disable, rings have to be stopped being processed. */
>>  	if (!enable && dev->dequeue_zero_copy)
>>  		drain_zmbuf_list(dev->virtqueue[index]);
>> @@ -2622,11 +2614,13 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>>  	struct virtio_net *dev;
>>  	struct VhostUserMsg msg;
>>  	struct rte_vdpa_device *vdpa_dev;
>> +	bool ready[VHOST_MAX_VRING];
>>  	int did = -1;
>>  	int ret;
>>  	int unlock_required = 0;
>>  	bool handled;
>>  	int request;
>> +	uint32_t i;
>>  
>>  	dev = get_device(vid);
>>  	if (dev == NULL)
>> @@ -2668,6 +2662,10 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>>  		VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
>>  	}
>>  
>> +	/* Save ready status for all the VQs before message handle. */
>> +	for (i = 0; i < VHOST_MAX_VRING; i++)
>> +		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
>> +
> 
> This big array can be avoided if you save the ready status in the
> virtqueue once message have been handled.
> 
>>  	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
>>  	if (ret < 0) {
>>  		VHOST_LOG_CONFIG(ERR,
>> @@ -2802,6 +2800,25 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>>  		return -1;
>>  	}
>>  
>> +	did = dev->vdpa_dev_id;
>> +	vdpa_dev = rte_vdpa_get_device(did);
>> +	/* Update ready status. */
>> +	for (i = 0; i < VHOST_MAX_VRING; i++) {
>> +		bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]);
>> +
>> +		if ((cur_ready && request == VHOST_USER_SET_VRING_ENABLE &&
>> +				i == msg.payload.state.index) ||
> 
> Couldn't we remove above condition? Aren't the callbacks already called
> in the set_vring_enable handler?
> 
>> +				cur_ready != ready[i]) {
>> +			if (vdpa_dev && vdpa_dev->ops->set_vring_state)
>> +				vdpa_dev->ops->set_vring_state(dev->vid, i,
>> +								(int)cur_ready);
>> +
>> +			if (dev->notify_ops->vring_state_changed)
>> +				dev->notify_ops->vring_state_changed(dev->vid,
>> +							i, (int)cur_ready);
>> +		}
>> +	}
> 
> I think we should move this into a dedicated function, which we would
> call in every message handler that can modify the ready state.
> 
> Doing so, we would not have to assume the master sent us disable request
> for the queue before, ans also would have proper synchronization if the
> request uses reply-ack feature as it could assume the backend is no more
> processing the ring once reply-ack is received.
> 
>>  	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
>>  		dev->flags |= VIRTIO_DEV_READY;
>>  
>> @@ -2816,8 +2833,6 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
>>  		}
>>  	}
>>  
>> -	did = dev->vdpa_dev_id;
>> -	vdpa_dev = rte_vdpa_get_device(did);
>>  	if (vdpa_dev && virtio_is_ready(dev) &&
>>  			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
>>  			msg.request.master == VHOST_USER_SET_VRING_CALL) {
> 
> Shouldn't check on SET_VRING_CALL above be removed?
> 

Thinking at it again, I think ready state should include whether or not
the queue is enabled. And as soon as a request impacting ring addresses
or call or kick FDs is handled, we should reset the modified value and
notify for state change in the imapcted queue. Then request is handled
and once the requests are handled, we can send state change updates if
any one changed.

Doing that, we don't have to assume the Vhost-user master will have send
the disable request before doing the state change. And if it did, the
'not ready' update won't be sent twice to the driver or application.

In case I am not clear enough, I have prototyped this idea (only
compile-tested). If it works for you, feel free to add it in your
series.

Thanks,
Maxime


diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index df98d15de6..48e8fcfbc0 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -150,6 +150,7 @@ struct vhost_virtqueue {
        /* Backend value to determine if device should started/stopped */
        int                     backend;
        int                     enabled;
+       bool                    ready;
        int                     access_ok;
        rte_spinlock_t          access_lock;

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index ea9cd107b9..f3cda536c6 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -228,6 +228,87 @@ vhost_backend_cleanup(struct virtio_net *dev)
        dev->postcopy_listening = 0;
 }

+
+static bool
+vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+       bool rings_ok;
+
+       if (!vq)
+               return false;
+
+       if (vq_is_packed(dev))
+               rings_ok = vq->desc_packed && vq->driver_event &&
+                       vq->device_event;
+       else
+               rings_ok = vq->desc && vq->avail && vq->used;
+
+       return rings_ok &&
+              vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
+              vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
+              vq->enabled;
+}
+
+static int
+virtio_is_ready(struct virtio_net *dev)
+{
+       struct vhost_virtqueue *vq;
+       uint32_t i;
+
+       if (dev->nr_vring == 0)
+               return 0;
+
+       for (i = 0; i < 2; i++) {
+               vq = dev->virtqueue[i];
+
+               if (!vq_is_ready(dev, vq))
+                       return 0;
+       }
+
+       VHOST_LOG_CONFIG(INFO,
+               "virtio is now ready for processing.\n");
+       return 1;
+}
+
+static void
+vhost_user_update_vring_state(struct virtio_net *dev, int idx)
+{
+       struct vhost_virtqueue *vq = dev->virtqueue[idx];
+       struct rte_vdpa_device *vdpa_dev;
+       int did;
+       bool was_ready = vq->ready;
+
+       vq->ready = vq_is_ready(dev, vq);
+       if (was_ready == vq->ready)
+               return;
+
+       if (dev->notify_ops->vring_state_changed)
+               dev->notify_ops->vring_state_changed(dev->vid, idx,
vq->ready);
+
+       did = dev->vdpa_dev_id;
+       vdpa_dev = rte_vdpa_get_device(did);
+       if (vdpa_dev && vdpa_dev->ops->set_vring_state)
+               vdpa_dev->ops->set_vring_state(dev->vid, idx, vq->ready);
+}
+
+static void
+vhost_user_update_vring_state_all(struct virtio_net *dev)
+{
+       uint32_t i;
+
+       for (i = 0; i < dev->nr_vring; i++)
+               vhost_user_update_vring_state(dev, i);
+}
+
+static void
+vhost_user_invalidate_vring(struct virtio_net *dev, int index)
+{
+       struct vhost_virtqueue *vq = dev->virtqueue[index];
+
+       vring_invalidate(dev, vq);
+       vhost_user_update_vring_state(dev, index);
+}
+
 /*
  * This function just returns success at the moment unless
  * the device hasn't been initialised.
@@ -841,7 +922,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
struct VhostUserMsg *msg,
         */
        memcpy(&vq->ring_addrs, addr, sizeof(*addr));

-       vring_invalidate(dev, vq);
+       vhost_user_invalidate_vring(dev, msg->payload.addr.index);

        if ((vq->enabled && (dev->features &
                                (1ULL <<
VHOST_USER_F_PROTOCOL_FEATURES))) ||
@@ -1267,7 +1348,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
struct VhostUserMsg *msg,
                         * need to be translated again as virtual
addresses have
                         * changed.
                         */
-                       vring_invalidate(dev, vq);
+                       vhost_user_invalidate_vring(dev, i);

                        dev = translate_ring_addresses(dev, i);
                        if (!dev) {
@@ -1290,46 +1371,6 @@ vhost_user_set_mem_table(struct virtio_net
**pdev, struct VhostUserMsg *msg,
        return RTE_VHOST_MSG_RESULT_ERR;
 }

-static bool
-vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
-{
-       bool rings_ok;
-
-       if (!vq)
-               return false;
-
-       if (vq_is_packed(dev))
-               rings_ok = vq->desc_packed && vq->driver_event &&
-                       vq->device_event;
-       else
-               rings_ok = vq->desc && vq->avail && vq->used;
-
-       return rings_ok &&
-              vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
-              vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
-}
-
-static int
-virtio_is_ready(struct virtio_net *dev)
-{
-       struct vhost_virtqueue *vq;
-       uint32_t i;
-
-       if (dev->nr_vring == 0)
-               return 0;
-
-       for (i = 0; i < dev->nr_vring; i++) {
-               vq = dev->virtqueue[i];
-
-               if (!vq_is_ready(dev, vq))
-                       return 0;
-       }
-
-       VHOST_LOG_CONFIG(INFO,
-               "virtio is now ready for processing.\n");
-       return 1;
-}
-
 static void *
 inflight_mem_alloc(const char *name, size_t size, int *fd)
 {
@@ -1599,6 +1640,10 @@ vhost_user_set_vring_call(struct virtio_net
**pdev, struct VhostUserMsg *msg,
        if (vq->callfd >= 0)
                close(vq->callfd);

+       vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+
+       vhost_user_update_vring_state(dev, file.index);
+
        vq->callfd = file.fd;

        return RTE_VHOST_MSG_RESULT_OK;
@@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct virtio_net
**pdev, struct VhostUserMsg *msg,
         * the ring starts already enabled. Otherwise, it is enabled via
         * the SET_VRING_ENABLE message.
         */
-       if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
+       if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
                vq->enabled = 1;
-               if (dev->notify_ops->vring_state_changed)
-                       dev->notify_ops->vring_state_changed(
-                               dev->vid, file.index, 1);
-       }

        if (vq->kickfd >= 0)
                close(vq->kickfd);
+
+       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+
+       vhost_user_update_vring_state(dev, file.index);
+
        vq->kickfd = file.fd;

        if (vq_is_packed(dev)) {
@@ -1953,6 +1999,10 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
        msg->size = sizeof(msg->payload.state);
        msg->fd_num = 0;

+       /*
+        * No need to call vhost_user_invalidate_vring here,
+        * device is destroyed.
+        */
        vring_invalidate(dev, vq);

        return RTE_VHOST_MSG_RESULT_REPLY;
@@ -1970,8 +2020,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
        struct virtio_net *dev = *pdev;
        int enable = (int)msg->payload.state.num;
        int index = (int)msg->payload.state.index;
-       struct rte_vdpa_device *vdpa_dev;
-       int did = -1;
+       struct vhost_virtqueue *vq = dev->virtqueue[index];

        if (validate_msg_fds(msg, 0) != 0)
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -1980,20 +2029,13 @@ vhost_user_set_vring_enable(struct virtio_net
**pdev,
                "set queue enable: %d to qp idx: %d\n",
                enable, index);

-       did = dev->vdpa_dev_id;
-       vdpa_dev = rte_vdpa_get_device(did);
-       if (vdpa_dev && vdpa_dev->ops->set_vring_state)
-               vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
-
-       if (dev->notify_ops->vring_state_changed)
-               dev->notify_ops->vring_state_changed(dev->vid,
-                               index, enable);
-
        /* On disable, rings have to be stopped being processed. */
        if (!enable && dev->dequeue_zero_copy)
-               drain_zmbuf_list(dev->virtqueue[index]);
+               drain_zmbuf_list(vq);
+
+       vq->enabled = enable;

-       dev->virtqueue[index]->enabled = enable;
+       vhost_user_update_vring_state(dev, index);

        return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -2332,7 +2374,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
struct VhostUserMsg *msg,
                                        imsg->size);

                        if (is_vring_iotlb(dev, vq, imsg))
-                               vring_invalidate(dev, vq);
+                               vhost_user_invalidate_vring(dev, i);
                }
                break;
        default:
@@ -2791,6 +2833,8 @@ vhost_user_msg_handler(int vid, int fd)
                return -1;
        }

+       vhost_user_update_vring_state_all(dev);
+
        if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
                dev->flags |= VIRTIO_DEV_READY;

@@ -2808,8 +2852,7 @@ vhost_user_msg_handler(int vid, int fd)
        did = dev->vdpa_dev_id;
        vdpa_dev = rte_vdpa_get_device(did);
        if (vdpa_dev && virtio_is_ready(dev) &&
-                       !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
-                       msg.request.master == VHOST_USER_SET_VRING_CALL) {
+                       !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
                if (vdpa_dev->ops->dev_conf)
                        vdpa_dev->ops->dev_conf(dev->vid);
                dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
  
Matan Azrad June 19, 2020, 1:11 p.m. UTC | #3
Hi Maxime

Thanks for the fast review.
This is first version, let's review it carefully to be sure it is correct.
@Xiao Wang, it will be good to hear your idea too.
We also need to understand the effect on IFC driver/device...
Just to update that I checked this code with the mlx5 adjustments and I sent in this series.
It works well with the vDPA example application.

From: Maxime Coquelin:
> On 6/18/20 6:28 PM, Matan Azrad wrote:
> > Some guest drivers may not configure disabled virtio queues.
> >
> > In this case, the vhost management never triggers the vDPA device
> > configuration because it waits to the device to be ready.
> 
> This is not vDPA-only, even with SW datapath the application's new_device
> callback never gets called.
> 
Yes, I wrote it below, I can be more specific here too in the next version.

> > The current ready state means that all the virtio queues should be
> > configured regardless the enablement status.
> >
> > In order to support this case, this patch changes the ready state:
> > The device is ready when at least 1 queue pair is configured and
> > enabled.
> >
> > So, now, the vDPA driver will be configured when the first queue pair
> > is configured and enabled.
> >
> > Also the queue state operation is change to the next rules:
> > 	1. queue becomes ready (enabled and fully configured) -
> > 		set_vring_state(enabled).
> > 	2. queue becomes not ready - set_vring_state(disabled).
> > 	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage
> was
> > 		handled - set_vring_state(enabled).
> >
> > The parallel operations for the application are adjusted too.
> >
> > Signed-off-by: Matan Azrad <matan@mellanox.com>
> > ---
> >  lib/librte_vhost/vhost_user.c | 51
> > ++++++++++++++++++++++++++++---------------
> >  1 file changed, 33 insertions(+), 18 deletions(-)
> >
> > diff --git a/lib/librte_vhost/vhost_user.c
> > b/lib/librte_vhost/vhost_user.c index b0849b9..cfd5f27 100644
> > --- a/lib/librte_vhost/vhost_user.c
> > +++ b/lib/librte_vhost/vhost_user.c
> > @@ -1295,7 +1295,7 @@
> >  {
> >  	bool rings_ok;
> >
> > -	if (!vq)
> > +	if (!vq || !vq->enabled)
> >  		return false;
> >
> >  	if (vq_is_packed(dev))
> > @@ -1309,24 +1309,27 @@
> >  	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;  }
> >
> > +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
> > +
> >  static int
> >  virtio_is_ready(struct virtio_net *dev)  {
> >  	struct vhost_virtqueue *vq;
> >  	uint32_t i;
> >
> > -	if (dev->nr_vring == 0)
> > +	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
> >  		return 0;
> >
> > -	for (i = 0; i < dev->nr_vring; i++) {
> > +	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
> >  		vq = dev->virtqueue[i];
> >
> >  		if (!vq_is_ready(dev, vq))
> >  			return 0;
> >  	}
> >
> > -	VHOST_LOG_CONFIG(INFO,
> > -		"virtio is now ready for processing.\n");
> > +	if (!(dev->flags & VIRTIO_DEV_READY))
> > +		VHOST_LOG_CONFIG(INFO,
> > +			"virtio is now ready for processing.\n");
> >  	return 1;
> >  }
> >
> > @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct
> virtio_net **pdev __rte_unused,
> >  	struct virtio_net *dev = *pdev;
> >  	int enable = (int)msg->payload.state.num;
> >  	int index = (int)msg->payload.state.index;
> > -	struct rte_vdpa_device *vdpa_dev;
> > -	int did = -1;
> >
> >  	if (validate_msg_fds(msg, 0) != 0)
> >  		return RTE_VHOST_MSG_RESULT_ERR;
> > @@ -1980,15 +1981,6 @@ static int vhost_user_set_vring_err(struct
> virtio_net **pdev __rte_unused,
> >  		"set queue enable: %d to qp idx: %d\n",
> >  		enable, index);
> >
> > -	did = dev->vdpa_dev_id;
> > -	vdpa_dev = rte_vdpa_get_device(did);
> > -	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> > -		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
> > -
> > -	if (dev->notify_ops->vring_state_changed)
> > -		dev->notify_ops->vring_state_changed(dev->vid,
> > -				index, enable);
> > -
> >  	/* On disable, rings have to be stopped being processed. */
> >  	if (!enable && dev->dequeue_zero_copy)
> >  		drain_zmbuf_list(dev->virtqueue[index]);
> > @@ -2622,11 +2614,13 @@ typedef int
> (*vhost_message_handler_t)(struct virtio_net **pdev,
> >  	struct virtio_net *dev;
> >  	struct VhostUserMsg msg;
> >  	struct rte_vdpa_device *vdpa_dev;
> > +	bool ready[VHOST_MAX_VRING];
> >  	int did = -1;
> >  	int ret;
> >  	int unlock_required = 0;
> >  	bool handled;
> >  	int request;
> > +	uint32_t i;
> >
> >  	dev = get_device(vid);
> >  	if (dev == NULL)
> > @@ -2668,6 +2662,10 @@ typedef int (*vhost_message_handler_t)(struct
> virtio_net **pdev,
> >  		VHOST_LOG_CONFIG(DEBUG, "External request %d\n",
> request);
> >  	}
> >
> > +	/* Save ready status for all the VQs before message handle. */
> > +	for (i = 0; i < VHOST_MAX_VRING; i++)
> > +		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
> > +
> 
> This big array can be avoided if you save the ready status in the virtqueue
> once message have been handled.

You mean you prefer to save it in virtqueue structure? Desn't it same memory ?
In any case I don't think 0x100 is so big 😊
 
> >  	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
> >  	if (ret < 0) {
> >  		VHOST_LOG_CONFIG(ERR,
> > @@ -2802,6 +2800,25 @@ typedef int (*vhost_message_handler_t)(struct
> virtio_net **pdev,
> >  		return -1;
> >  	}
> >
> > +	did = dev->vdpa_dev_id;
> > +	vdpa_dev = rte_vdpa_get_device(did);
> > +	/* Update ready status. */
> > +	for (i = 0; i < VHOST_MAX_VRING; i++) {
> > +		bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]);
> > +
> > +		if ((cur_ready && request ==
> VHOST_USER_SET_VRING_ENABLE &&
> > +				i == msg.payload.state.index) ||
> 
> Couldn't we remove above condition? Aren't the callbacks already called in
> the set_vring_enable handler?

As we agreed in the design discussion:

" 3. Same handling of the requests, except that we won't notify the 
 vdpa driver and the application of vring state changes in the 
 VHOST_USER_SET_VRING_ENABLE handler."  

So, I removed it from the set_vring_enable handler.

Now, the ready state doesn't depend only in VHOST_USER_SET_VRING_ENABLE massage.
 
> > +				cur_ready != ready[i]) {
> > +			if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> > +				vdpa_dev->ops->set_vring_state(dev->vid, i,
> > +
> 	(int)cur_ready);
> > +
> > +			if (dev->notify_ops->vring_state_changed)
> > +				dev->notify_ops->vring_state_changed(dev-
> >vid,
> > +							i, (int)cur_ready);
> > +		}
> > +	}
> 
> I think we should move this into a dedicated function, which we would call in
> every message handler that can modify the ready state.
>
> Doing so, we would not have to assume the master sent us disable request
> for the queue before, ans also would have proper synchronization if the
> request uses reply-ack feature as it could assume the backend is no more
> processing the ring once reply-ack is received.

Makes sense to do it before reply-ack and to create dedicated function to it.

Doen't the vDPA conf should be called before reply-ack too to be sure queues are ready before reply?

If so, we should move also the device ready code below (maybe also vdpa conf) to this function too.
 
But maybe call it directly from this function and not from the specific massage handlers is better, something like the vhost_user_check_and_alloc_queue_pair function style.

What do you think?

> >  	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
> >  		dev->flags |= VIRTIO_DEV_READY;
> >
> > @@ -2816,8 +2833,6 @@ typedef int (*vhost_message_handler_t)(struct
> virtio_net **pdev,
> >  		}
> >  	}
> >
> > -	did = dev->vdpa_dev_id;
> > -	vdpa_dev = rte_vdpa_get_device(did);
> >  	if (vdpa_dev && virtio_is_ready(dev) &&
> >  			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)
> &&
> >  			msg.request.master ==
> VHOST_USER_SET_VRING_CALL) {
> 
> Shouldn't check on SET_VRING_CALL above be removed?

Isn't it is a workaround for something?
  
Maxime Coquelin June 19, 2020, 1:54 p.m. UTC | #4
Hi Matan,

On 6/19/20 3:11 PM, Matan Azrad wrote:
> Hi Maxime
> 
> Thanks for the fast review.
> This is first version, let's review it carefully to be sure it is correct.
> @Xiao Wang, it will be good to hear your idea too.
> We also need to understand the effect on IFC driver/device...
> Just to update that I checked this code with the mlx5 adjustments and I sent in this series.
> It works well with the vDPA example application.

OK.

> From: Maxime Coquelin:
>> On 6/18/20 6:28 PM, Matan Azrad wrote:
>>> Some guest drivers may not configure disabled virtio queues.
>>>
>>> In this case, the vhost management never triggers the vDPA device
>>> configuration because it waits to the device to be ready.
>>
>> This is not vDPA-only, even with SW datapath the application's new_device
>> callback never gets called.
>>
> Yes, I wrote it below, I can be more specific here too in the next version.
> 
>>> The current ready state means that all the virtio queues should be
>>> configured regardless the enablement status.
>>>
>>> In order to support this case, this patch changes the ready state:
>>> The device is ready when at least 1 queue pair is configured and
>>> enabled.
>>>
>>> So, now, the vDPA driver will be configured when the first queue pair
>>> is configured and enabled.
>>>
>>> Also the queue state operation is change to the next rules:
>>> 	1. queue becomes ready (enabled and fully configured) -
>>> 		set_vring_state(enabled).
>>> 	2. queue becomes not ready - set_vring_state(disabled).
>>> 	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage
>> was
>>> 		handled - set_vring_state(enabled).
>>>
>>> The parallel operations for the application are adjusted too.
>>>
>>> Signed-off-by: Matan Azrad <matan@mellanox.com>
>>> ---
>>>  lib/librte_vhost/vhost_user.c | 51
>>> ++++++++++++++++++++++++++++---------------
>>>  1 file changed, 33 insertions(+), 18 deletions(-)
>>>
>>> diff --git a/lib/librte_vhost/vhost_user.c
>>> b/lib/librte_vhost/vhost_user.c index b0849b9..cfd5f27 100644
>>> --- a/lib/librte_vhost/vhost_user.c
>>> +++ b/lib/librte_vhost/vhost_user.c
>>> @@ -1295,7 +1295,7 @@
>>>  {
>>>  	bool rings_ok;
>>>
>>> -	if (!vq)
>>> +	if (!vq || !vq->enabled)
>>>  		return false;
>>>
>>>  	if (vq_is_packed(dev))
>>> @@ -1309,24 +1309,27 @@
>>>  	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;  }
>>>
>>> +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
>>> +
>>>  static int
>>>  virtio_is_ready(struct virtio_net *dev)  {
>>>  	struct vhost_virtqueue *vq;
>>>  	uint32_t i;
>>>
>>> -	if (dev->nr_vring == 0)
>>> +	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
>>>  		return 0;
>>>
>>> -	for (i = 0; i < dev->nr_vring; i++) {
>>> +	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
>>>  		vq = dev->virtqueue[i];
>>>
>>>  		if (!vq_is_ready(dev, vq))
>>>  			return 0;
>>>  	}
>>>
>>> -	VHOST_LOG_CONFIG(INFO,
>>> -		"virtio is now ready for processing.\n");
>>> +	if (!(dev->flags & VIRTIO_DEV_READY))
>>> +		VHOST_LOG_CONFIG(INFO,
>>> +			"virtio is now ready for processing.\n");
>>>  	return 1;
>>>  }
>>>
>>> @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct
>> virtio_net **pdev __rte_unused,
>>>  	struct virtio_net *dev = *pdev;
>>>  	int enable = (int)msg->payload.state.num;
>>>  	int index = (int)msg->payload.state.index;
>>> -	struct rte_vdpa_device *vdpa_dev;
>>> -	int did = -1;
>>>
>>>  	if (validate_msg_fds(msg, 0) != 0)
>>>  		return RTE_VHOST_MSG_RESULT_ERR;
>>> @@ -1980,15 +1981,6 @@ static int vhost_user_set_vring_err(struct
>> virtio_net **pdev __rte_unused,
>>>  		"set queue enable: %d to qp idx: %d\n",
>>>  		enable, index);
>>>
>>> -	did = dev->vdpa_dev_id;
>>> -	vdpa_dev = rte_vdpa_get_device(did);
>>> -	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
>>> -		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
>>> -
>>> -	if (dev->notify_ops->vring_state_changed)
>>> -		dev->notify_ops->vring_state_changed(dev->vid,
>>> -				index, enable);
>>> -
>>>  	/* On disable, rings have to be stopped being processed. */
>>>  	if (!enable && dev->dequeue_zero_copy)
>>>  		drain_zmbuf_list(dev->virtqueue[index]);
>>> @@ -2622,11 +2614,13 @@ typedef int
>> (*vhost_message_handler_t)(struct virtio_net **pdev,
>>>  	struct virtio_net *dev;
>>>  	struct VhostUserMsg msg;
>>>  	struct rte_vdpa_device *vdpa_dev;
>>> +	bool ready[VHOST_MAX_VRING];
>>>  	int did = -1;
>>>  	int ret;
>>>  	int unlock_required = 0;
>>>  	bool handled;
>>>  	int request;
>>> +	uint32_t i;
>>>
>>>  	dev = get_device(vid);
>>>  	if (dev == NULL)
>>> @@ -2668,6 +2662,10 @@ typedef int (*vhost_message_handler_t)(struct
>> virtio_net **pdev,
>>>  		VHOST_LOG_CONFIG(DEBUG, "External request %d\n",
>> request);
>>>  	}
>>>
>>> +	/* Save ready status for all the VQs before message handle. */
>>> +	for (i = 0; i < VHOST_MAX_VRING; i++)
>>> +		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
>>> +
>>
>> This big array can be avoided if you save the ready status in the virtqueue
>> once message have been handled.
> 
> You mean you prefer to save it in virtqueue structure? Desn't it same memory ?
> In any case I don't think 0x100 is so big 😊

I mean in the stack.

And one advantage of saving it in the vq structure is for example you
have memory hotplug. The vq is in ready state in the beginning and in
the end, but during the handling the ring host virtual addresses get
changed because of the munmap/mmap and we need to notify the driver
otherwise it will miss it.

>  
>>>  	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
>>>  	if (ret < 0) {
>>>  		VHOST_LOG_CONFIG(ERR,
>>> @@ -2802,6 +2800,25 @@ typedef int (*vhost_message_handler_t)(struct
>> virtio_net **pdev,
>>>  		return -1;
>>>  	}
>>>
>>> +	did = dev->vdpa_dev_id;
>>> +	vdpa_dev = rte_vdpa_get_device(did);
>>> +	/* Update ready status. */
>>> +	for (i = 0; i < VHOST_MAX_VRING; i++) {
>>> +		bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]);
>>> +
>>> +		if ((cur_ready && request ==
>> VHOST_USER_SET_VRING_ENABLE &&
>>> +				i == msg.payload.state.index) ||
>>
>> Couldn't we remove above condition? Aren't the callbacks already called in
>> the set_vring_enable handler?
> 
> As we agreed in the design discussion:
> 
> " 3. Same handling of the requests, except that we won't notify the 
>  vdpa driver and the application of vring state changes in the 
>  VHOST_USER_SET_VRING_ENABLE handler."  
> 
> So, I removed it from the set_vring_enable handler.

My bad, the patch context where it is removed made to think it was in
vhost_user_set_vring_err(), so I missed it.

Thinking at it again since last time we discussed it, we have to send
the notification from the handler in the case

> Now, the ready state doesn't depend only in VHOST_USER_SET_VRING_ENABLE massage.
>  
>>> +				cur_ready != ready[i]) {
>>> +			if (vdpa_dev && vdpa_dev->ops->set_vring_state)
>>> +				vdpa_dev->ops->set_vring_state(dev->vid, i,
>>> +
>> 	(int)cur_ready);
>>> +
>>> +			if (dev->notify_ops->vring_state_changed)
>>> +				dev->notify_ops->vring_state_changed(dev-
>>> vid,
>>> +							i, (int)cur_ready);
>>> +		}
>>> +	}
>>
>> I think we should move this into a dedicated function, which we would call in
>> every message handler that can modify the ready state.
>>
>> Doing so, we would not have to assume the master sent us disable request
>> for the queue before, ans also would have proper synchronization if the
>> request uses reply-ack feature as it could assume the backend is no more
>> processing the ring once reply-ack is received.
> 
> Makes sense to do it before reply-ack and to create dedicated function to it.
> 
> Doen't the vDPA conf should be called before reply-ack too to be sure queues are ready before reply?

I don't think so, because the backend can start processing the ring
after. What we don't want is that the backend continues to process the
rings when the guest asked to stop doing it.

> If so, we should move also the device ready code below (maybe also vdpa conf) to this function too.

So I don't think it is needed.

> But maybe call it directly from this function and not from the specific massage handlers is better, something like the vhost_user_check_and_alloc_queue_pair function style.
> 
> What do you think?
> 
>>>  	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
>>>  		dev->flags |= VIRTIO_DEV_READY;
>>>
>>> @@ -2816,8 +2833,6 @@ typedef int (*vhost_message_handler_t)(struct
>> virtio_net **pdev,
>>>  		}
>>>  	}
>>>
>>> -	did = dev->vdpa_dev_id;
>>> -	vdpa_dev = rte_vdpa_get_device(did);
>>>  	if (vdpa_dev && virtio_is_ready(dev) &&
>>>  			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)
>> &&
>>>  			msg.request.master ==
>> VHOST_USER_SET_VRING_CALL) {
>>
>> Shouldn't check on SET_VRING_CALL above be removed?
> 
> Isn't it is a workaround for something?
> 

Normally, we should no more need it, as state change notification will
be sent if callfd came to change.
  
Matan Azrad June 21, 2020, 6:20 a.m. UTC | #5
Hi Maxime

From: Maxime Coquelin:
> Hi Matan,
> 
> On 6/19/20 3:11 PM, Matan Azrad wrote:
> > Hi Maxime
> >
> > Thanks for the fast review.
> > This is first version, let's review it carefully to be sure it is correct.
> > @Xiao Wang, it will be good to hear your idea too.
> > We also need to understand the effect on IFC driver/device...
> > Just to update that I checked this code with the mlx5 adjustments and I
> sent in this series.
> > It works well with the vDPA example application.
> 
> OK.
> 
> > From: Maxime Coquelin:
> >> On 6/18/20 6:28 PM, Matan Azrad wrote:
> >>> Some guest drivers may not configure disabled virtio queues.
> >>>
> >>> In this case, the vhost management never triggers the vDPA device
> >>> configuration because it waits to the device to be ready.
> >>
> >> This is not vDPA-only, even with SW datapath the application's
> >> new_device callback never gets called.
> >>
> > Yes, I wrote it below, I can be more specific here too in the next version.
> >
> >>> The current ready state means that all the virtio queues should be
> >>> configured regardless the enablement status.
> >>>
> >>> In order to support this case, this patch changes the ready state:
> >>> The device is ready when at least 1 queue pair is configured and
> >>> enabled.
> >>>
> >>> So, now, the vDPA driver will be configured when the first queue
> >>> pair is configured and enabled.
> >>>
> >>> Also the queue state operation is change to the next rules:
> >>> 	1. queue becomes ready (enabled and fully configured) -
> >>> 		set_vring_state(enabled).
> >>> 	2. queue becomes not ready - set_vring_state(disabled).
> >>> 	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage
> >> was
> >>> 		handled - set_vring_state(enabled).
> >>>
> >>> The parallel operations for the application are adjusted too.
> >>>
> >>> Signed-off-by: Matan Azrad <matan@mellanox.com>
> >>> ---
> >>>  lib/librte_vhost/vhost_user.c | 51
> >>> ++++++++++++++++++++++++++++---------------
> >>>  1 file changed, 33 insertions(+), 18 deletions(-)
> >>>
> >>> diff --git a/lib/librte_vhost/vhost_user.c
> >>> b/lib/librte_vhost/vhost_user.c index b0849b9..cfd5f27 100644
> >>> --- a/lib/librte_vhost/vhost_user.c
> >>> +++ b/lib/librte_vhost/vhost_user.c
> >>> @@ -1295,7 +1295,7 @@
> >>>  {
> >>>  	bool rings_ok;
> >>>
> >>> -	if (!vq)
> >>> +	if (!vq || !vq->enabled)
> >>>  		return false;
> >>>
> >>>  	if (vq_is_packed(dev))
> >>> @@ -1309,24 +1309,27 @@
> >>>  	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;  }
> >>>
> >>> +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
> >>> +
> >>>  static int
> >>>  virtio_is_ready(struct virtio_net *dev)  {
> >>>  	struct vhost_virtqueue *vq;
> >>>  	uint32_t i;
> >>>
> >>> -	if (dev->nr_vring == 0)
> >>> +	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
> >>>  		return 0;
> >>>
> >>> -	for (i = 0; i < dev->nr_vring; i++) {
> >>> +	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
> >>>  		vq = dev->virtqueue[i];
> >>>
> >>>  		if (!vq_is_ready(dev, vq))
> >>>  			return 0;
> >>>  	}
> >>>
> >>> -	VHOST_LOG_CONFIG(INFO,
> >>> -		"virtio is now ready for processing.\n");
> >>> +	if (!(dev->flags & VIRTIO_DEV_READY))
> >>> +		VHOST_LOG_CONFIG(INFO,
> >>> +			"virtio is now ready for processing.\n");
> >>>  	return 1;
> >>>  }
> >>>
> >>> @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct
> >> virtio_net **pdev __rte_unused,
> >>>  	struct virtio_net *dev = *pdev;
> >>>  	int enable = (int)msg->payload.state.num;
> >>>  	int index = (int)msg->payload.state.index;
> >>> -	struct rte_vdpa_device *vdpa_dev;
> >>> -	int did = -1;
> >>>
> >>>  	if (validate_msg_fds(msg, 0) != 0)
> >>>  		return RTE_VHOST_MSG_RESULT_ERR;
> >>> @@ -1980,15 +1981,6 @@ static int vhost_user_set_vring_err(struct
> >> virtio_net **pdev __rte_unused,
> >>>  		"set queue enable: %d to qp idx: %d\n",
> >>>  		enable, index);
> >>>
> >>> -	did = dev->vdpa_dev_id;
> >>> -	vdpa_dev = rte_vdpa_get_device(did);
> >>> -	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> >>> -		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
> >>> -
> >>> -	if (dev->notify_ops->vring_state_changed)
> >>> -		dev->notify_ops->vring_state_changed(dev->vid,
> >>> -				index, enable);
> >>> -
> >>>  	/* On disable, rings have to be stopped being processed. */
> >>>  	if (!enable && dev->dequeue_zero_copy)
> >>>  		drain_zmbuf_list(dev->virtqueue[index]);
> >>> @@ -2622,11 +2614,13 @@ typedef int
> >> (*vhost_message_handler_t)(struct virtio_net **pdev,
> >>>  	struct virtio_net *dev;
> >>>  	struct VhostUserMsg msg;
> >>>  	struct rte_vdpa_device *vdpa_dev;
> >>> +	bool ready[VHOST_MAX_VRING];
> >>>  	int did = -1;
> >>>  	int ret;
> >>>  	int unlock_required = 0;
> >>>  	bool handled;
> >>>  	int request;
> >>> +	uint32_t i;
> >>>
> >>>  	dev = get_device(vid);
> >>>  	if (dev == NULL)
> >>> @@ -2668,6 +2662,10 @@ typedef int
> (*vhost_message_handler_t)(struct
> >> virtio_net **pdev,
> >>>  		VHOST_LOG_CONFIG(DEBUG, "External request %d\n",
> >> request);
> >>>  	}
> >>>
> >>> +	/* Save ready status for all the VQs before message handle. */
> >>> +	for (i = 0; i < VHOST_MAX_VRING; i++)
> >>> +		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
> >>> +
> >>
> >> This big array can be avoided if you save the ready status in the
> >> virtqueue once message have been handled.
> >
> > You mean you prefer to save it in virtqueue structure? Desn't it same
> memory ?
> > In any case I don't think 0x100 is so big 😊
> 
> I mean in the stack.

Do you think that 256B is too much for stack?
 
> And one advantage of saving it in the vq structure is for example you have
> memory hotplug. The vq is in ready state in the beginning and in the end, but
> during the handling the ring host virtual addresses get changed because of
> the munmap/mmap and we need to notify the driver otherwise it will miss it.

Do you mean VHOST_USER_SET_MEM_TABLE call after first configuration?

I don't understand what is the issue of saving it in stack here....

But one advantage of saving it in virtqueue structure is that the message handler should not check the ready state before each message.

I will change it in next version.

> >
> >>>  	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
> >>>  	if (ret < 0) {
> >>>  		VHOST_LOG_CONFIG(ERR,
> >>> @@ -2802,6 +2800,25 @@ typedef int
> (*vhost_message_handler_t)(struct
> >> virtio_net **pdev,
> >>>  		return -1;
> >>>  	}
> >>>
> >>> +	did = dev->vdpa_dev_id;
> >>> +	vdpa_dev = rte_vdpa_get_device(did);
> >>> +	/* Update ready status. */
> >>> +	for (i = 0; i < VHOST_MAX_VRING; i++) {
> >>> +		bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]);
> >>> +
> >>> +		if ((cur_ready && request ==
> >> VHOST_USER_SET_VRING_ENABLE &&
> >>> +				i == msg.payload.state.index) ||
> >>
> >> Couldn't we remove above condition? Aren't the callbacks already
> >> called in the set_vring_enable handler?
> >
> > As we agreed in the design discussion:
> >
> > " 3. Same handling of the requests, except that we won't notify the
> > vdpa driver and the application of vring state changes in the
> > VHOST_USER_SET_VRING_ENABLE handler."
> >
> > So, I removed it from the set_vring_enable handler.
> 
> My bad, the patch context where it is removed made to think it was in
> vhost_user_set_vring_err(), so I missed it.
> 
> Thinking at it again since last time we discussed it, we have to send the
> notification from the handler in the case
> 
> > Now, the ready state doesn't depend only in
> VHOST_USER_SET_VRING_ENABLE massage.
> >
> >>> +				cur_ready != ready[i]) {
> >>> +			if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> >>> +				vdpa_dev->ops->set_vring_state(dev->vid, i,
> >>> +
> >> 	(int)cur_ready);
> >>> +
> >>> +			if (dev->notify_ops->vring_state_changed)
> >>> +				dev->notify_ops->vring_state_changed(dev-
> >>> vid,
> >>> +							i, (int)cur_ready);
> >>> +		}
> >>> +	}
> >>
> >> I think we should move this into a dedicated function, which we would
> >> call in every message handler that can modify the ready state.
> >>
> >> Doing so, we would not have to assume the master sent us disable
> >> request for the queue before, ans also would have proper
> >> synchronization if the request uses reply-ack feature as it could
> >> assume the backend is no more processing the ring once reply-ack is
> received.
> >
> > Makes sense to do it before reply-ack and to create dedicated function to
> it.
> >
> > Doen't the vDPA conf should be called before reply-ack too to be sure
> queues are ready before reply?
> 
> I don't think so, because the backend can start processing the ring after.
> What we don't want is that the backend continues to process the rings when
> the guest asked to stop doing it.

But "doing configuration after reply" may cause that the a guest kicks a queue while app \ vDPA driver is being configured.
It may lead to some order dependencies in configuration....

In addition, now, the device ready state becomes on only in the same time that a queue becomes on,
so we can do the device ready check (for new_device \ dev_conf calls) only when a queue becomes ready in the same function.

> > If so, we should move also the device ready code below (maybe also vdpa
> conf) to this function too.
> 
> So I don't think it is needed.
> > But maybe call it directly from this function and not from the specific
> massage handlers is better, something like the
> vhost_user_check_and_alloc_queue_pair function style.
> >
> > What do you think?

Any answer here?

> >
> >>>  	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
> >>>  		dev->flags |= VIRTIO_DEV_READY;
> >>>
> >>> @@ -2816,8 +2833,6 @@ typedef int
> (*vhost_message_handler_t)(struct
> >> virtio_net **pdev,
> >>>  		}
> >>>  	}
> >>>
> >>> -	did = dev->vdpa_dev_id;
> >>> -	vdpa_dev = rte_vdpa_get_device(did);
> >>>  	if (vdpa_dev && virtio_is_ready(dev) &&
> >>>  			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)
> >> &&
> >>>  			msg.request.master ==
> >> VHOST_USER_SET_VRING_CALL) {
> >>
> >> Shouldn't check on SET_VRING_CALL above be removed?
> >
> > Isn't it is a workaround for something?
> >
> 
> Normally, we should no more need it, as state change notification will be
> sent if callfd came to change.

Ok, will remove it.
  
Maxime Coquelin June 22, 2020, 8:04 a.m. UTC | #6
Hi,

On 6/21/20 8:20 AM, Matan Azrad wrote:
> Hi Maxime
> 
> From: Maxime Coquelin:
>> Hi Matan,
>>
>> On 6/19/20 3:11 PM, Matan Azrad wrote:
>>> Hi Maxime
>>>
>>> Thanks for the fast review.
>>> This is first version, let's review it carefully to be sure it is correct.
>>> @Xiao Wang, it will be good to hear your idea too.
>>> We also need to understand the effect on IFC driver/device...
>>> Just to update that I checked this code with the mlx5 adjustments and I
>> sent in this series.
>>> It works well with the vDPA example application.
>>
>> OK.
>>
>>> From: Maxime Coquelin:
>>>> On 6/18/20 6:28 PM, Matan Azrad wrote:
>>>>> Some guest drivers may not configure disabled virtio queues.
>>>>>
>>>>> In this case, the vhost management never triggers the vDPA device
>>>>> configuration because it waits to the device to be ready.
>>>>
>>>> This is not vDPA-only, even with SW datapath the application's
>>>> new_device callback never gets called.
>>>>
>>> Yes, I wrote it below, I can be more specific here too in the next version.
>>>
>>>>> The current ready state means that all the virtio queues should be
>>>>> configured regardless the enablement status.
>>>>>
>>>>> In order to support this case, this patch changes the ready state:
>>>>> The device is ready when at least 1 queue pair is configured and
>>>>> enabled.
>>>>>
>>>>> So, now, the vDPA driver will be configured when the first queue
>>>>> pair is configured and enabled.
>>>>>
>>>>> Also the queue state operation is change to the next rules:
>>>>> 	1. queue becomes ready (enabled and fully configured) -
>>>>> 		set_vring_state(enabled).
>>>>> 	2. queue becomes not ready - set_vring_state(disabled).
>>>>> 	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage
>>>> was
>>>>> 		handled - set_vring_state(enabled).
>>>>>
>>>>> The parallel operations for the application are adjusted too.
>>>>>
>>>>> Signed-off-by: Matan Azrad <matan@mellanox.com>
>>>>> ---
>>>>>  lib/librte_vhost/vhost_user.c | 51
>>>>> ++++++++++++++++++++++++++++---------------
>>>>>  1 file changed, 33 insertions(+), 18 deletions(-)
>>>>>
>>>>> diff --git a/lib/librte_vhost/vhost_user.c
>>>>> b/lib/librte_vhost/vhost_user.c index b0849b9..cfd5f27 100644
>>>>> --- a/lib/librte_vhost/vhost_user.c
>>>>> +++ b/lib/librte_vhost/vhost_user.c
>>>>> @@ -1295,7 +1295,7 @@
>>>>>  {
>>>>>  	bool rings_ok;
>>>>>
>>>>> -	if (!vq)
>>>>> +	if (!vq || !vq->enabled)
>>>>>  		return false;
>>>>>
>>>>>  	if (vq_is_packed(dev))
>>>>> @@ -1309,24 +1309,27 @@
>>>>>  	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;  }
>>>>>
>>>>> +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
>>>>> +
>>>>>  static int
>>>>>  virtio_is_ready(struct virtio_net *dev)  {
>>>>>  	struct vhost_virtqueue *vq;
>>>>>  	uint32_t i;
>>>>>
>>>>> -	if (dev->nr_vring == 0)
>>>>> +	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
>>>>>  		return 0;
>>>>>
>>>>> -	for (i = 0; i < dev->nr_vring; i++) {
>>>>> +	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
>>>>>  		vq = dev->virtqueue[i];
>>>>>
>>>>>  		if (!vq_is_ready(dev, vq))
>>>>>  			return 0;
>>>>>  	}
>>>>>
>>>>> -	VHOST_LOG_CONFIG(INFO,
>>>>> -		"virtio is now ready for processing.\n");
>>>>> +	if (!(dev->flags & VIRTIO_DEV_READY))
>>>>> +		VHOST_LOG_CONFIG(INFO,
>>>>> +			"virtio is now ready for processing.\n");
>>>>>  	return 1;
>>>>>  }
>>>>>
>>>>> @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct
>>>> virtio_net **pdev __rte_unused,
>>>>>  	struct virtio_net *dev = *pdev;
>>>>>  	int enable = (int)msg->payload.state.num;
>>>>>  	int index = (int)msg->payload.state.index;
>>>>> -	struct rte_vdpa_device *vdpa_dev;
>>>>> -	int did = -1;
>>>>>
>>>>>  	if (validate_msg_fds(msg, 0) != 0)
>>>>>  		return RTE_VHOST_MSG_RESULT_ERR;
>>>>> @@ -1980,15 +1981,6 @@ static int vhost_user_set_vring_err(struct
>>>> virtio_net **pdev __rte_unused,
>>>>>  		"set queue enable: %d to qp idx: %d\n",
>>>>>  		enable, index);
>>>>>
>>>>> -	did = dev->vdpa_dev_id;
>>>>> -	vdpa_dev = rte_vdpa_get_device(did);
>>>>> -	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
>>>>> -		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
>>>>> -
>>>>> -	if (dev->notify_ops->vring_state_changed)
>>>>> -		dev->notify_ops->vring_state_changed(dev->vid,
>>>>> -				index, enable);
>>>>> -
>>>>>  	/* On disable, rings have to be stopped being processed. */
>>>>>  	if (!enable && dev->dequeue_zero_copy)
>>>>>  		drain_zmbuf_list(dev->virtqueue[index]);
>>>>> @@ -2622,11 +2614,13 @@ typedef int
>>>> (*vhost_message_handler_t)(struct virtio_net **pdev,
>>>>>  	struct virtio_net *dev;
>>>>>  	struct VhostUserMsg msg;
>>>>>  	struct rte_vdpa_device *vdpa_dev;
>>>>> +	bool ready[VHOST_MAX_VRING];
>>>>>  	int did = -1;
>>>>>  	int ret;
>>>>>  	int unlock_required = 0;
>>>>>  	bool handled;
>>>>>  	int request;
>>>>> +	uint32_t i;
>>>>>
>>>>>  	dev = get_device(vid);
>>>>>  	if (dev == NULL)
>>>>> @@ -2668,6 +2662,10 @@ typedef int
>> (*vhost_message_handler_t)(struct
>>>> virtio_net **pdev,
>>>>>  		VHOST_LOG_CONFIG(DEBUG, "External request %d\n",
>>>> request);
>>>>>  	}
>>>>>
>>>>> +	/* Save ready status for all the VQs before message handle. */
>>>>> +	for (i = 0; i < VHOST_MAX_VRING; i++)
>>>>> +		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
>>>>> +
>>>>
>>>> This big array can be avoided if you save the ready status in the
>>>> virtqueue once message have been handled.
>>>
>>> You mean you prefer to save it in virtqueue structure? Desn't it same
>> memory ?
>>> In any case I don't think 0x100 is so big 😊
>>
>> I mean in the stack.
> 
> Do you think that 256B is too much for stack?
>  
>> And one advantage of saving it in the vq structure is for example you have
>> memory hotplug. The vq is in ready state in the beginning and in the end, but
>> during the handling the ring host virtual addresses get changed because of
>> the munmap/mmap and we need to notify the driver otherwise it will miss it.
> 
> Do you mean VHOST_USER_SET_MEM_TABLE call after first configuration?
> 
> I don't understand what is the issue of saving it in stack here....

The issue is if you only check ready state only before and after the
message affecting the ring is handled, it can be ready at both stages,
while the rings have changed and state change callback should have been
called.

Please check the example patch I sent on Friday, it takes care of
invalidating the ring state and call the state change callback.

> But one advantage of saving it in virtqueue structure is that the message handler should not check the ready state before each message.
> 
> I will change it in next version.
> 
>>>
>>>>>  	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
>>>>>  	if (ret < 0) {
>>>>>  		VHOST_LOG_CONFIG(ERR,
>>>>> @@ -2802,6 +2800,25 @@ typedef int
>> (*vhost_message_handler_t)(struct
>>>> virtio_net **pdev,
>>>>>  		return -1;
>>>>>  	}
>>>>>
>>>>> +	did = dev->vdpa_dev_id;
>>>>> +	vdpa_dev = rte_vdpa_get_device(did);
>>>>> +	/* Update ready status. */
>>>>> +	for (i = 0; i < VHOST_MAX_VRING; i++) {
>>>>> +		bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]);
>>>>> +
>>>>> +		if ((cur_ready && request ==
>>>> VHOST_USER_SET_VRING_ENABLE &&
>>>>> +				i == msg.payload.state.index) ||
>>>>
>>>> Couldn't we remove above condition? Aren't the callbacks already
>>>> called in the set_vring_enable handler?
>>>
>>> As we agreed in the design discussion:
>>>
>>> " 3. Same handling of the requests, except that we won't notify the
>>> vdpa driver and the application of vring state changes in the
>>> VHOST_USER_SET_VRING_ENABLE handler."
>>>
>>> So, I removed it from the set_vring_enable handler.
>>
>> My bad, the patch context where it is removed made to think it was in
>> vhost_user_set_vring_err(), so I missed it.
>>
>> Thinking at it again since last time we discussed it, we have to send the
>> notification from the handler in the case
>>
>>> Now, the ready state doesn't depend only in
>> VHOST_USER_SET_VRING_ENABLE massage.
>>>
>>>>> +				cur_ready != ready[i]) {
>>>>> +			if (vdpa_dev && vdpa_dev->ops->set_vring_state)
>>>>> +				vdpa_dev->ops->set_vring_state(dev->vid, i,
>>>>> +
>>>> 	(int)cur_ready);
>>>>> +
>>>>> +			if (dev->notify_ops->vring_state_changed)
>>>>> +				dev->notify_ops->vring_state_changed(dev-
>>>>> vid,
>>>>> +							i, (int)cur_ready);
>>>>> +		}
>>>>> +	}
>>>>
>>>> I think we should move this into a dedicated function, which we would
>>>> call in every message handler that can modify the ready state.
>>>>
>>>> Doing so, we would not have to assume the master sent us disable
>>>> request for the queue before, ans also would have proper
>>>> synchronization if the request uses reply-ack feature as it could
>>>> assume the backend is no more processing the ring once reply-ack is
>> received.
>>>
>>> Makes sense to do it before reply-ack and to create dedicated function to
>> it.
>>>
>>> Doen't the vDPA conf should be called before reply-ack too to be sure
>> queues are ready before reply?
>>
>> I don't think so, because the backend can start processing the ring after.
>> What we don't want is that the backend continues to process the rings when
>> the guest asked to stop doing it.
> 
> But "doing configuration after reply" may cause that the a guest kicks a queue while app \ vDPA driver is being configured.
> It may lead to some order dependencies in configuration....
I get your point, we can try to move the configuration before the reply.

But looking at qemu source code, neither SET_VRING_KICK nor
SET_VRING_CALL nor SET_VRING_ENABLE request for reply-ack, so it won't
have any effect.

> In addition, now, the device ready state becomes on only in the same time that a queue becomes on,
> so we can do the device ready check (for new_device \ dev_conf calls) only when a queue becomes ready in the same function.

If you want, we can do try that too.

>>> If so, we should move also the device ready code below (maybe also vdpa
>> conf) to this function too.
>>
>> So I don't think it is needed.
>>> But maybe call it directly from this function and not from the specific
>> massage handlers is better, something like the
>> vhost_user_check_and_alloc_queue_pair function style.
>>>
>>> What do you think?
> 
> Any answer here?

To move the .new_device and .dev_conf callbacks in the same fonction
that sends the vring change notifications? Yes, we can do that I think.

>>>
>>>>>  	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
>>>>>  		dev->flags |= VIRTIO_DEV_READY;
>>>>>
>>>>> @@ -2816,8 +2833,6 @@ typedef int
>> (*vhost_message_handler_t)(struct
>>>> virtio_net **pdev,
>>>>>  		}
>>>>>  	}
>>>>>
>>>>> -	did = dev->vdpa_dev_id;
>>>>> -	vdpa_dev = rte_vdpa_get_device(did);
>>>>>  	if (vdpa_dev && virtio_is_ready(dev) &&
>>>>>  			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)
>>>> &&
>>>>>  			msg.request.master ==
>>>> VHOST_USER_SET_VRING_CALL) {
>>>>
>>>> Shouldn't check on SET_VRING_CALL above be removed?
>>>
>>> Isn't it is a workaround for something?
>>>
>>
>> Normally, we should no more need it, as state change notification will be
>> sent if callfd came to change.
> 
> Ok, will remove it.
>
  
Matan Azrad June 22, 2020, 8:41 a.m. UTC | #7
From: Maxime Coquelin
> Hi,
> 
> On 6/21/20 8:20 AM, Matan Azrad wrote:
> > Hi Maxime
> >
> > From: Maxime Coquelin:
> >> Hi Matan,
> >>
> >> On 6/19/20 3:11 PM, Matan Azrad wrote:
> >>> Hi Maxime
> >>>
> >>> Thanks for the fast review.
> >>> This is first version, let's review it carefully to be sure it is correct.
> >>> @Xiao Wang, it will be good to hear your idea too.
> >>> We also need to understand the effect on IFC driver/device...
> >>> Just to update that I checked this code with the mlx5 adjustments
> >>> and I
> >> sent in this series.
> >>> It works well with the vDPA example application.
> >>
> >> OK.
> >>
> >>> From: Maxime Coquelin:
> >>>> On 6/18/20 6:28 PM, Matan Azrad wrote:
> >>>>> Some guest drivers may not configure disabled virtio queues.
> >>>>>
> >>>>> In this case, the vhost management never triggers the vDPA device
> >>>>> configuration because it waits to the device to be ready.
> >>>>
> >>>> This is not vDPA-only, even with SW datapath the application's
> >>>> new_device callback never gets called.
> >>>>
> >>> Yes, I wrote it below, I can be more specific here too in the next version.
> >>>
> >>>>> The current ready state means that all the virtio queues should be
> >>>>> configured regardless the enablement status.
> >>>>>
> >>>>> In order to support this case, this patch changes the ready state:
> >>>>> The device is ready when at least 1 queue pair is configured and
> >>>>> enabled.
> >>>>>
> >>>>> So, now, the vDPA driver will be configured when the first queue
> >>>>> pair is configured and enabled.
> >>>>>
> >>>>> Also the queue state operation is change to the next rules:
> >>>>> 	1. queue becomes ready (enabled and fully configured) -
> >>>>> 		set_vring_state(enabled).
> >>>>> 	2. queue becomes not ready - set_vring_state(disabled).
> >>>>> 	3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage
> >>>> was
> >>>>> 		handled - set_vring_state(enabled).
> >>>>>
> >>>>> The parallel operations for the application are adjusted too.
> >>>>>
> >>>>> Signed-off-by: Matan Azrad <matan@mellanox.com>
> >>>>> ---
> >>>>>  lib/librte_vhost/vhost_user.c | 51
> >>>>> ++++++++++++++++++++++++++++---------------
> >>>>>  1 file changed, 33 insertions(+), 18 deletions(-)
> >>>>>
> >>>>> diff --git a/lib/librte_vhost/vhost_user.c
> >>>>> b/lib/librte_vhost/vhost_user.c index b0849b9..cfd5f27 100644
> >>>>> --- a/lib/librte_vhost/vhost_user.c
> >>>>> +++ b/lib/librte_vhost/vhost_user.c
> >>>>> @@ -1295,7 +1295,7 @@
> >>>>>  {
> >>>>>  	bool rings_ok;
> >>>>>
> >>>>> -	if (!vq)
> >>>>> +	if (!vq || !vq->enabled)
> >>>>>  		return false;
> >>>>>
> >>>>>  	if (vq_is_packed(dev))
> >>>>> @@ -1309,24 +1309,27 @@
> >>>>>  	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;  }
> >>>>>
> >>>>> +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
> >>>>> +
> >>>>>  static int
> >>>>>  virtio_is_ready(struct virtio_net *dev)  {
> >>>>>  	struct vhost_virtqueue *vq;
> >>>>>  	uint32_t i;
> >>>>>
> >>>>> -	if (dev->nr_vring == 0)
> >>>>> +	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
> >>>>>  		return 0;
> >>>>>
> >>>>> -	for (i = 0; i < dev->nr_vring; i++) {
> >>>>> +	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
> >>>>>  		vq = dev->virtqueue[i];
> >>>>>
> >>>>>  		if (!vq_is_ready(dev, vq))
> >>>>>  			return 0;
> >>>>>  	}
> >>>>>
> >>>>> -	VHOST_LOG_CONFIG(INFO,
> >>>>> -		"virtio is now ready for processing.\n");
> >>>>> +	if (!(dev->flags & VIRTIO_DEV_READY))
> >>>>> +		VHOST_LOG_CONFIG(INFO,
> >>>>> +			"virtio is now ready for processing.\n");
> >>>>>  	return 1;
> >>>>>  }
> >>>>>
> >>>>> @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct
> >>>> virtio_net **pdev __rte_unused,
> >>>>>  	struct virtio_net *dev = *pdev;
> >>>>>  	int enable = (int)msg->payload.state.num;
> >>>>>  	int index = (int)msg->payload.state.index;
> >>>>> -	struct rte_vdpa_device *vdpa_dev;
> >>>>> -	int did = -1;
> >>>>>
> >>>>>  	if (validate_msg_fds(msg, 0) != 0)
> >>>>>  		return RTE_VHOST_MSG_RESULT_ERR; @@ -1980,15 +1981,6
> @@ static
> >>>>> int vhost_user_set_vring_err(struct
> >>>> virtio_net **pdev __rte_unused,
> >>>>>  		"set queue enable: %d to qp idx: %d\n",
> >>>>>  		enable, index);
> >>>>>
> >>>>> -	did = dev->vdpa_dev_id;
> >>>>> -	vdpa_dev = rte_vdpa_get_device(did);
> >>>>> -	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> >>>>> -		vdpa_dev->ops->set_vring_state(dev->vid, index,
> enable);
> >>>>> -
> >>>>> -	if (dev->notify_ops->vring_state_changed)
> >>>>> -		dev->notify_ops->vring_state_changed(dev->vid,
> >>>>> -				index, enable);
> >>>>> -
> >>>>>  	/* On disable, rings have to be stopped being processed. */
> >>>>>  	if (!enable && dev->dequeue_zero_copy)
> >>>>>  		drain_zmbuf_list(dev->virtqueue[index]);
> >>>>> @@ -2622,11 +2614,13 @@ typedef int
> >>>> (*vhost_message_handler_t)(struct virtio_net **pdev,
> >>>>>  	struct virtio_net *dev;
> >>>>>  	struct VhostUserMsg msg;
> >>>>>  	struct rte_vdpa_device *vdpa_dev;
> >>>>> +	bool ready[VHOST_MAX_VRING];
> >>>>>  	int did = -1;
> >>>>>  	int ret;
> >>>>>  	int unlock_required = 0;
> >>>>>  	bool handled;
> >>>>>  	int request;
> >>>>> +	uint32_t i;
> >>>>>
> >>>>>  	dev = get_device(vid);
> >>>>>  	if (dev == NULL)
> >>>>> @@ -2668,6 +2662,10 @@ typedef int
> >> (*vhost_message_handler_t)(struct
> >>>> virtio_net **pdev,
> >>>>>  		VHOST_LOG_CONFIG(DEBUG, "External request %d\n",
> >>>> request);
> >>>>>  	}
> >>>>>
> >>>>> +	/* Save ready status for all the VQs before message handle.
> */
> >>>>> +	for (i = 0; i < VHOST_MAX_VRING; i++)
> >>>>> +		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
> >>>>> +
> >>>>
> >>>> This big array can be avoided if you save the ready status in the
> >>>> virtqueue once message have been handled.
> >>>
> >>> You mean you prefer to save it in virtqueue structure? Desn't it
> >>> same
> >> memory ?
> >>> In any case I don't think 0x100 is so big 😊
> >>
> >> I mean in the stack.
> >
> > Do you think that 256B is too much for stack?
> >
> >> And one advantage of saving it in the vq structure is for example you
> >> have memory hotplug. The vq is in ready state in the beginning and in
> >> the end, but during the handling the ring host virtual addresses get
> >> changed because of the munmap/mmap and we need to notify the driver
> otherwise it will miss it.
> >
> > Do you mean VHOST_USER_SET_MEM_TABLE call after first configuration?
> >
> > I don't understand what is the issue of saving it in stack here....
> 
> The issue is if you only check ready state only before and after the message
> affecting the ring is handled, it can be ready at both stages, while the rings
> have changed and state change callback should have been called.

But in this version I checked twice, before message handler and after message handler, so it should catch any update.

In any case, as I said, I will move the ready memory to the virtiqueue structure in order to save the check before the message handler.
 
> Please check the example patch I sent on Friday, it takes care of invalidating
> the ring state and call the state change callback.
> 
> > But one advantage of saving it in virtqueue structure is that the message
> handler should not check the ready state before each message.
> >
> > I will change it in next version.
> >
> >>>
> >>>>>  	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
> >>>>>  	if (ret < 0) {
> >>>>>  		VHOST_LOG_CONFIG(ERR,
> >>>>> @@ -2802,6 +2800,25 @@ typedef int
> >> (*vhost_message_handler_t)(struct
> >>>> virtio_net **pdev,
> >>>>>  		return -1;
> >>>>>  	}
> >>>>>
> >>>>> +	did = dev->vdpa_dev_id;
> >>>>> +	vdpa_dev = rte_vdpa_get_device(did);
> >>>>> +	/* Update ready status. */
> >>>>> +	for (i = 0; i < VHOST_MAX_VRING; i++) {
> >>>>> +		bool cur_ready = vq_is_ready(dev, dev-
> >virtqueue[i]);
> >>>>> +
> >>>>> +		if ((cur_ready && request ==
> >>>> VHOST_USER_SET_VRING_ENABLE &&
> >>>>> +				i == msg.payload.state.index) ||
> >>>>
> >>>> Couldn't we remove above condition? Aren't the callbacks already
> >>>> called in the set_vring_enable handler?
> >>>
> >>> As we agreed in the design discussion:
> >>>
> >>> " 3. Same handling of the requests, except that we won't notify the
> >>> vdpa driver and the application of vring state changes in the
> >>> VHOST_USER_SET_VRING_ENABLE handler."
> >>>
> >>> So, I removed it from the set_vring_enable handler.
> >>
> >> My bad, the patch context where it is removed made to think it was in
> >> vhost_user_set_vring_err(), so I missed it.
> >>
> >> Thinking at it again since last time we discussed it, we have to send
> >> the notification from the handler in the case
> >>
> >>> Now, the ready state doesn't depend only in
> >> VHOST_USER_SET_VRING_ENABLE massage.
> >>>
> >>>>> +				cur_ready != ready[i]) {
> >>>>> +			if (vdpa_dev && vdpa_dev->ops-
> >set_vring_state)
> >>>>> +				vdpa_dev->ops-
> >set_vring_state(dev->vid, i,
> >>>>> +
> >>>> 	(int)cur_ready);
> >>>>> +
> >>>>> +			if (dev->notify_ops->vring_state_changed)
> >>>>> +				dev->notify_ops-
> >vring_state_changed(dev-
> >>>>> vid,
> >>>>> +							i,
> (int)cur_ready);
> >>>>> +		}
> >>>>> +	}
> >>>>
> >>>> I think we should move this into a dedicated function, which we
> >>>> would call in every message handler that can modify the ready state.
> >>>>
> >>>> Doing so, we would not have to assume the master sent us disable
> >>>> request for the queue before, ans also would have proper
> >>>> synchronization if the request uses reply-ack feature as it could
> >>>> assume the backend is no more processing the ring once reply-ack is
> >> received.
> >>>
> >>> Makes sense to do it before reply-ack and to create dedicated
> >>> function to
> >> it.
> >>>
> >>> Doen't the vDPA conf should be called before reply-ack too to be
> >>> sure
> >> queues are ready before reply?
> >>
> >> I don't think so, because the backend can start processing the ring after.
> >> What we don't want is that the backend continues to process the rings
> >> when the guest asked to stop doing it.
> >
> > But "doing configuration after reply" may cause that the a guest kicks a
> queue while app \ vDPA driver is being configured.
> > It may lead to some order dependencies in configuration....
> I get your point, we can try to move the configuration before the reply.
> 
> But looking at qemu source code, neither SET_VRING_KICK nor
> SET_VRING_CALL nor SET_VRING_ENABLE request for reply-ack, so it won't
> have any effect.
> 
> > In addition, now, the device ready state becomes on only in the same
> > time that a queue becomes on, so we can do the device ready check (for
> new_device \ dev_conf calls) only when a queue becomes ready in the same
> function.
> 
> If you want, we can do try that too.
> 
> >>> If so, we should move also the device ready code below (maybe also
> >>> vdpa
> >> conf) to this function too.
> >>
> >> So I don't think it is needed.
> >>> But maybe call it directly from this function and not from the
> >>> specific
> >> massage handlers is better, something like the
> >> vhost_user_check_and_alloc_queue_pair function style.
> >>>
> >>> What do you think?
> >
> > Any answer here?
> 
> To move the .new_device and .dev_conf callbacks in the same fonction that
> sends the vring change notifications? Yes, we can do that I think.
> 
> >>>
> >>>>>  	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
> >>>>>  		dev->flags |= VIRTIO_DEV_READY;
> >>>>>
> >>>>> @@ -2816,8 +2833,6 @@ typedef int
> >> (*vhost_message_handler_t)(struct
> >>>> virtio_net **pdev,
> >>>>>  		}
> >>>>>  	}
> >>>>>
> >>>>> -	did = dev->vdpa_dev_id;
> >>>>> -	vdpa_dev = rte_vdpa_get_device(did);
> >>>>>  	if (vdpa_dev && virtio_is_ready(dev) &&
> >>>>>  			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)
> >>>> &&
> >>>>>  			msg.request.master ==
> >>>> VHOST_USER_SET_VRING_CALL) {
> >>>>
> >>>> Shouldn't check on SET_VRING_CALL above be removed?
> >>>
> >>> Isn't it is a workaround for something?
> >>>
> >>
> >> Normally, we should no more need it, as state change notification
> >> will be sent if callfd came to change.
> >
> > Ok, will remove it.
> >
  
Maxime Coquelin June 22, 2020, 8:56 a.m. UTC | #8
On 6/22/20 10:41 AM, Matan Azrad wrote:
>> The issue is if you only check ready state only before and after the message
>> affecting the ring is handled, it can be ready at both stages, while the rings
>> have changed and state change callback should have been called.
> But in this version I checked twice, before message handler and after message handler, so it should catch any update.

No, this is not enough, we have to check also during some handlers, so
that the ready state is invalidated because sometimes it will be ready
before and after the message handler but with different values.

That's what I did in my example patch:
@@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct virtio_net
**pdev, struct VhostUserMsg *msg,

...

        if (vq->kickfd >= 0)
                close(vq->kickfd);
+
+       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+
+       vhost_user_update_vring_state(dev, file.index);
+
        vq->kickfd = file.fd;


Without that, the ready check will return ready before and after the
kickfd changed and the driver won't be notified.

> In any case, as I said, I will move the ready memory to the virtiqueue structure in order to save the check before the message handler.
>  
>> Please check the example patch I sent on Friday, it takes care of invalidating
>> the ring state and call the state change callback.
  
Matan Azrad June 22, 2020, 10:06 a.m. UTC | #9
Hi Maxime

From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Monday, June 22, 2020 11:56 AM
> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> <xiao.w.wang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> 
> 
> 
> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >> The issue is if you only check ready state only before and after the
> >> message affecting the ring is handled, it can be ready at both
> >> stages, while the rings have changed and state change callback should
> have been called.
> > But in this version I checked twice, before message handler and after
> message handler, so it should catch any update.
> 
> No, this is not enough, we have to check also during some handlers, so that
> the ready state is invalidated because sometimes it will be ready before and
> after the message handler but with different values.
> 
> That's what I did in my example patch:
> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct virtio_net
> **pdev, struct VhostUserMsg *msg,
> 
> ...
> 
>         if (vq->kickfd >= 0)
>                 close(vq->kickfd);
> +
> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> +
> +       vhost_user_update_vring_state(dev, file.index);
> +
>         vq->kickfd = file.fd;
> 
> 
> Without that, the ready check will return ready before and after the kickfd
> changed and the driver won't be notified.

The driver will be notified in the next VHOST_USER_SET_VRING_ENABLE message according to v1.

One of our assumption we agreed on in the design mail is that it doesn't make sense that QEMU will change queue configuration without enabling the queue again.
Because of that we decided to force calling state callback again when QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the queue is already ready.
So when driver/app see state enable->enable, it should take into account that the queue configuration was probably changed.

I think that this assumption is correct according to the QEMU code.

That's why I prefer to collect all the ready checks callbacks (queue state and device new\conf) to one function that will be called after the message handler:
Pseudo:
 vhost_user_update_ready_statuses() {
	switch (msg):
		case enable:
			if(enable is 1)
				force queue state =1.
		case callfd
		case kickfd
				.....
		Check queue and device ready + call callbacks if needed..
		Default
			Return;
}
  
Maxime Coquelin June 22, 2020, 12:32 p.m. UTC | #10
On 6/22/20 12:06 PM, Matan Azrad wrote:
> 
> Hi Maxime
> 
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Monday, June 22, 2020 11:56 AM
>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>> <xiao.w.wang@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>
>>
>>
>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>> The issue is if you only check ready state only before and after the
>>>> message affecting the ring is handled, it can be ready at both
>>>> stages, while the rings have changed and state change callback should
>> have been called.
>>> But in this version I checked twice, before message handler and after
>> message handler, so it should catch any update.
>>
>> No, this is not enough, we have to check also during some handlers, so that
>> the ready state is invalidated because sometimes it will be ready before and
>> after the message handler but with different values.
>>
>> That's what I did in my example patch:
>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct virtio_net
>> **pdev, struct VhostUserMsg *msg,
>>
>> ...
>>
>>         if (vq->kickfd >= 0)
>>                 close(vq->kickfd);
>> +
>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>> +
>> +       vhost_user_update_vring_state(dev, file.index);
>> +
>>         vq->kickfd = file.fd;
>>
>>
>> Without that, the ready check will return ready before and after the kickfd
>> changed and the driver won't be notified.
> 
> The driver will be notified in the next VHOST_USER_SET_VRING_ENABLE message according to v1.
> 
> One of our assumption we agreed on in the design mail is that it doesn't make sense that QEMU will change queue configuration without enabling the queue again.
> Because of that we decided to force calling state callback again when QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the queue is already ready.
> So when driver/app see state enable->enable, it should take into account that the queue configuration was probably changed.
> 
> I think that this assumption is correct according to the QEMU code.

Yes, this was our initial assumption.
But now looking into the details of the implementation, I find it is
even cleaner & clearer not to do this assumption.

> That's why I prefer to collect all the ready checks callbacks (queue state and device new\conf) to one function that will be called after the message handler:
> Pseudo:
>  vhost_user_update_ready_statuses() {
> 	switch (msg):
> 		case enable:
> 			if(enable is 1)
> 				force queue state =1.
> 		case callfd
> 		case kickfd
> 				.....
> 		Check queue and device ready + call callbacks if needed..
> 		Default
> 			Return;
> }

I find it more natural to "invalidate" ready state where it is handled
(after vring_invalidate(), before setting new FD for call & kick, ...)
  
Matan Azrad June 22, 2020, 1:43 p.m. UTC | #11
From: Maxime Coquelin:
> Sent: Monday, June 22, 2020 3:33 PM
> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> <xiao.w.wang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> 
> 
> 
> On 6/22/20 12:06 PM, Matan Azrad wrote:
> >
> > Hi Maxime
> >
> > From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >> Sent: Monday, June 22, 2020 11:56 AM
> >> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >> <xiao.w.wang@intel.com>
> >> Cc: dev@dpdk.org
> >> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> >>
> >>
> >>
> >> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >>>> The issue is if you only check ready state only before and after
> >>>> the message affecting the ring is handled, it can be ready at both
> >>>> stages, while the rings have changed and state change callback
> >>>> should
> >> have been called.
> >>> But in this version I checked twice, before message handler and
> >>> after
> >> message handler, so it should catch any update.
> >>
> >> No, this is not enough, we have to check also during some handlers,
> >> so that the ready state is invalidated because sometimes it will be
> >> ready before and after the message handler but with different values.
> >>
> >> That's what I did in my example patch:
> >> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct virtio_net
> >> **pdev, struct VhostUserMsg *msg,
> >>
> >> ...
> >>
> >>         if (vq->kickfd >= 0)
> >>                 close(vq->kickfd);
> >> +
> >> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >> +
> >> +       vhost_user_update_vring_state(dev, file.index);
> >> +
> >>         vq->kickfd = file.fd;
> >>
> >>
> >> Without that, the ready check will return ready before and after the
> >> kickfd changed and the driver won't be notified.
> >
> > The driver will be notified in the next VHOST_USER_SET_VRING_ENABLE
> message according to v1.
> >
> > One of our assumption we agreed on in the design mail is that it doesn't
> make sense that QEMU will change queue configuration without enabling
> the queue again.
> > Because of that we decided to force calling state callback again when
> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the
> queue is already ready.
> > So when driver/app see state enable->enable, it should take into account
> that the queue configuration was probably changed.
> >
> > I think that this assumption is correct according to the QEMU code.
> 
> Yes, this was our initial assumption.
> But now looking into the details of the implementation, I find it is even
> cleaner & clearer not to do this assumption.
> 
> > That's why I prefer to collect all the ready checks callbacks (queue state and
> device new\conf) to one function that will be called after the message
> handler:
> > Pseudo:
> >  vhost_user_update_ready_statuses() {
> > 	switch (msg):
> > 		case enable:
> > 			if(enable is 1)
> > 				force queue state =1.
> > 		case callfd
> > 		case kickfd
> > 				.....
> > 		Check queue and device ready + call callbacks if needed..
> > 		Default
> > 			Return;
> > }
> 
> I find it more natural to "invalidate" ready state where it is handled (after
> vring_invalidate(), before setting new FD for call & kick, ...)

I think that if you go with this direction, if the first queue pair is invalidated, you need to notify app\driver also about device ready change.
Also it will cause 2 notifications to the driver instead of one in case of FD change.

Why not to take this correct assumption and update ready state only in one point in the code instead of doing it in all the configuration handlers around?
IMO, It is correct, less intrusive, simpler, clearer and cleaner.
In addition it saves the style that already used in this function in:
- vhost_user_check_and_alloc_queue_pair
- 	switch (request) {
	case VHOST_USER_SET_FEATURES:
	case VHOST_USER_SET_PROTOCOL_FEATURES:
	case VHOST_USER_SET_OWNER:
	case VHOST_USER_SET_MEM_TABLE:
	case VHOST_USER_SET_LOG_BASE:
	case VHOST_USER_SET_LOG_FD:
	case VHOST_USER_SET_VRING_NUM:
	case VHOST_USER_SET_VRING_ADDR:
	case VHOST_USER_SET_VRING_BASE:
	case VHOST_USER_SET_VRING_KICK:
	case VHOST_USER_SET_VRING_CALL:
	case VHOST_USER_SET_VRING_ERR:
	case VHOST_USER_SET_VRING_ENABLE:
	case VHOST_USER_SEND_RARP:
	case VHOST_USER_NET_SET_MTU:
	case VHOST_USER_SET_SLAVE_REQ_FD:
			vhost_user_lock_all_queue_pairs(dev);

Matan
  
Maxime Coquelin June 22, 2020, 2:55 p.m. UTC | #12
On 6/22/20 3:43 PM, Matan Azrad wrote:
> 
> 
> From: Maxime Coquelin:
>> Sent: Monday, June 22, 2020 3:33 PM
>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>> <xiao.w.wang@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>
>>
>>
>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>
>>> Hi Maxime
>>>
>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>> <xiao.w.wang@intel.com>
>>>> Cc: dev@dpdk.org
>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>>>
>>>>
>>>>
>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>> The issue is if you only check ready state only before and after
>>>>>> the message affecting the ring is handled, it can be ready at both
>>>>>> stages, while the rings have changed and state change callback
>>>>>> should
>>>> have been called.
>>>>> But in this version I checked twice, before message handler and
>>>>> after
>>>> message handler, so it should catch any update.
>>>>
>>>> No, this is not enough, we have to check also during some handlers,
>>>> so that the ready state is invalidated because sometimes it will be
>>>> ready before and after the message handler but with different values.
>>>>
>>>> That's what I did in my example patch:
>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct virtio_net
>>>> **pdev, struct VhostUserMsg *msg,
>>>>
>>>> ...
>>>>
>>>>         if (vq->kickfd >= 0)
>>>>                 close(vq->kickfd);
>>>> +
>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>> +
>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>> +
>>>>         vq->kickfd = file.fd;
>>>>
>>>>
>>>> Without that, the ready check will return ready before and after the
>>>> kickfd changed and the driver won't be notified.
>>>
>>> The driver will be notified in the next VHOST_USER_SET_VRING_ENABLE
>> message according to v1.
>>>
>>> One of our assumption we agreed on in the design mail is that it doesn't
>> make sense that QEMU will change queue configuration without enabling
>> the queue again.
>>> Because of that we decided to force calling state callback again when
>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the
>> queue is already ready.
>>> So when driver/app see state enable->enable, it should take into account
>> that the queue configuration was probably changed.
>>>
>>> I think that this assumption is correct according to the QEMU code.
>>
>> Yes, this was our initial assumption.
>> But now looking into the details of the implementation, I find it is even
>> cleaner & clearer not to do this assumption.
>>
>>> That's why I prefer to collect all the ready checks callbacks (queue state and
>> device new\conf) to one function that will be called after the message
>> handler:
>>> Pseudo:
>>>  vhost_user_update_ready_statuses() {
>>> 	switch (msg):
>>> 		case enable:
>>> 			if(enable is 1)
>>> 				force queue state =1.
>>> 		case callfd
>>> 		case kickfd
>>> 				.....
>>> 		Check queue and device ready + call callbacks if needed..
>>> 		Default
>>> 			Return;
>>> }
>>
>> I find it more natural to "invalidate" ready state where it is handled (after
>> vring_invalidate(), before setting new FD for call & kick, ...)
> 
> I think that if you go with this direction, if the first queue pair is invalidated, you need to notify app\driver also about device ready change.
> Also it will cause 2 notifications to the driver instead of one in case of FD change.

You'll always end-up with two notifications, either Qemu has sent the
disable and so you'll have one notification for the disable and one for
the enable, or it didn't sent the disable and it will happen at
old value invalidation time and after new value is taken into account.

> Why not to take this correct assumption and update ready state only in one point in the code instead of doing it in all the configuration handlers around?
> IMO, It is correct, less intrusive, simpler, clearer and cleaner.

I just looked closer at the Vhost-user spec, and I'm no more so sure
this is a correct assumption:

"While processing the rings (whether they are enabled or not), client
must support changing some configuration aspects on the fly."

> In addition it saves the style that already used in this function in:
> - vhost_user_check_and_alloc_queue_pair
> - 	switch (request) {
> 	case VHOST_USER_SET_FEATURES:
> 	case VHOST_USER_SET_PROTOCOL_FEATURES:
> 	case VHOST_USER_SET_OWNER:
> 	case VHOST_USER_SET_MEM_TABLE:
> 	case VHOST_USER_SET_LOG_BASE:
> 	case VHOST_USER_SET_LOG_FD:
> 	case VHOST_USER_SET_VRING_NUM:
> 	case VHOST_USER_SET_VRING_ADDR:
> 	case VHOST_USER_SET_VRING_BASE:
> 	case VHOST_USER_SET_VRING_KICK:
> 	case VHOST_USER_SET_VRING_CALL:
> 	case VHOST_USER_SET_VRING_ERR:
> 	case VHOST_USER_SET_VRING_ENABLE:
> 	case VHOST_USER_SEND_RARP:
> 	case VHOST_USER_NET_SET_MTU:
> 	case VHOST_USER_SET_SLAVE_REQ_FD:
> 			vhost_user_lock_all_queue_pairs(dev);
> 
> Matan
> 
> 
> 
>
  
Matan Azrad June 22, 2020, 3:51 p.m. UTC | #13
From: Maxime Coquelin:
> On 6/22/20 3:43 PM, Matan Azrad wrote:
> >
> >
> > From: Maxime Coquelin:
> >> Sent: Monday, June 22, 2020 3:33 PM
> >> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >> <xiao.w.wang@intel.com>
> >> Cc: dev@dpdk.org
> >> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> >>
> >>
> >>
> >> On 6/22/20 12:06 PM, Matan Azrad wrote:
> >>>
> >>> Hi Maxime
> >>>
> >>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >>>> Sent: Monday, June 22, 2020 11:56 AM
> >>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>> <xiao.w.wang@intel.com>
> >>>> Cc: dev@dpdk.org
> >>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> >>>>
> >>>>
> >>>>
> >>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >>>>>> The issue is if you only check ready state only before and after
> >>>>>> the message affecting the ring is handled, it can be ready at
> >>>>>> both stages, while the rings have changed and state change
> >>>>>> callback should
> >>>> have been called.
> >>>>> But in this version I checked twice, before message handler and
> >>>>> after
> >>>> message handler, so it should catch any update.
> >>>>
> >>>> No, this is not enough, we have to check also during some handlers,
> >>>> so that the ready state is invalidated because sometimes it will be
> >>>> ready before and after the message handler but with different values.
> >>>>
> >>>> That's what I did in my example patch:
> >>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
> virtio_net
> >>>> **pdev, struct VhostUserMsg *msg,
> >>>>
> >>>> ...
> >>>>
> >>>>         if (vq->kickfd >= 0)
> >>>>                 close(vq->kickfd);
> >>>> +
> >>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >>>> +
> >>>> +       vhost_user_update_vring_state(dev, file.index);
> >>>> +
> >>>>         vq->kickfd = file.fd;
> >>>>
> >>>>
> >>>> Without that, the ready check will return ready before and after
> >>>> the kickfd changed and the driver won't be notified.
> >>>
> >>> The driver will be notified in the next VHOST_USER_SET_VRING_ENABLE
> >> message according to v1.
> >>>
> >>> One of our assumption we agreed on in the design mail is that it
> >>> doesn't
> >> make sense that QEMU will change queue configuration without enabling
> >> the queue again.
> >>> Because of that we decided to force calling state callback again
> >>> when
> >> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the
> queue is
> >> already ready.
> >>> So when driver/app see state enable->enable, it should take into
> >>> account
> >> that the queue configuration was probably changed.
> >>>
> >>> I think that this assumption is correct according to the QEMU code.
> >>
> >> Yes, this was our initial assumption.
> >> But now looking into the details of the implementation, I find it is
> >> even cleaner & clearer not to do this assumption.
> >>
> >>> That's why I prefer to collect all the ready checks callbacks (queue
> >>> state and
> >> device new\conf) to one function that will be called after the
> >> message
> >> handler:
> >>> Pseudo:
> >>>  vhost_user_update_ready_statuses() {
> >>> 	switch (msg):
> >>> 		case enable:
> >>> 			if(enable is 1)
> >>> 				force queue state =1.
> >>> 		case callfd
> >>> 		case kickfd
> >>> 				.....
> >>> 		Check queue and device ready + call callbacks if needed..
> >>> 		Default
> >>> 			Return;
> >>> }
> >>
> >> I find it more natural to "invalidate" ready state where it is
> >> handled (after vring_invalidate(), before setting new FD for call &
> >> kick, ...)
> >
> > I think that if you go with this direction, if the first queue pair is invalidated,
> you need to notify app\driver also about device ready change.
> > Also it will cause 2 notifications to the driver instead of one in case of FD
> change.
> 
> You'll always end-up with two notifications, either Qemu has sent the disable
> and so you'll have one notification for the disable and one for the enable, or
> it didn't sent the disable and it will happen at old value invalidation time and
> after new value is taken into account.
>

I don't see it in current QEMU behavior.
When working MQ I see that some virtqs get configuration message while they are in enabled state.
Then, enable message is sent again later.

 
> > Why not to take this correct assumption and update ready state only in one
> point in the code instead of doing it in all the configuration handlers around?
> > IMO, It is correct, less intrusive, simpler, clearer and cleaner.
> 
> I just looked closer at the Vhost-user spec, and I'm no more so sure this is a
> correct assumption:
> 
> "While processing the rings (whether they are enabled or not), client must
> support changing some configuration aspects on the fly."

Ok, this doesn't explain how configuration is changed on the fly.
As I mentioned, QEMU sends enable message always after configuration message.


> > In addition it saves the style that already used in this function in:
> > - vhost_user_check_and_alloc_queue_pair
> > - 	switch (request) {
> > 	case VHOST_USER_SET_FEATURES:
> > 	case VHOST_USER_SET_PROTOCOL_FEATURES:
> > 	case VHOST_USER_SET_OWNER:
> > 	case VHOST_USER_SET_MEM_TABLE:
> > 	case VHOST_USER_SET_LOG_BASE:
> > 	case VHOST_USER_SET_LOG_FD:
> > 	case VHOST_USER_SET_VRING_NUM:
> > 	case VHOST_USER_SET_VRING_ADDR:
> > 	case VHOST_USER_SET_VRING_BASE:
> > 	case VHOST_USER_SET_VRING_KICK:
> > 	case VHOST_USER_SET_VRING_CALL:
> > 	case VHOST_USER_SET_VRING_ERR:
> > 	case VHOST_USER_SET_VRING_ENABLE:
> > 	case VHOST_USER_SEND_RARP:
> > 	case VHOST_USER_NET_SET_MTU:
> > 	case VHOST_USER_SET_SLAVE_REQ_FD:
> > 			vhost_user_lock_all_queue_pairs(dev);
> >
> > Matan
> >
> >
> >
> >
  
Maxime Coquelin June 22, 2020, 4:47 p.m. UTC | #14
On 6/22/20 5:51 PM, Matan Azrad wrote:
> 
> 
> From: Maxime Coquelin:
>> On 6/22/20 3:43 PM, Matan Azrad wrote:
>>>
>>>
>>> From: Maxime Coquelin:
>>>> Sent: Monday, June 22, 2020 3:33 PM
>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>> <xiao.w.wang@intel.com>
>>>> Cc: dev@dpdk.org
>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>>>
>>>>
>>>>
>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>>>
>>>>> Hi Maxime
>>>>>
>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>> <xiao.w.wang@intel.com>
>>>>>> Cc: dev@dpdk.org
>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>>>>>
>>>>>>
>>>>>>
>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>>>> The issue is if you only check ready state only before and after
>>>>>>>> the message affecting the ring is handled, it can be ready at
>>>>>>>> both stages, while the rings have changed and state change
>>>>>>>> callback should
>>>>>> have been called.
>>>>>>> But in this version I checked twice, before message handler and
>>>>>>> after
>>>>>> message handler, so it should catch any update.
>>>>>>
>>>>>> No, this is not enough, we have to check also during some handlers,
>>>>>> so that the ready state is invalidated because sometimes it will be
>>>>>> ready before and after the message handler but with different values.
>>>>>>
>>>>>> That's what I did in my example patch:
>>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
>> virtio_net
>>>>>> **pdev, struct VhostUserMsg *msg,
>>>>>>
>>>>>> ...
>>>>>>
>>>>>>         if (vq->kickfd >= 0)
>>>>>>                 close(vq->kickfd);
>>>>>> +
>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>>>> +
>>>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>>>> +
>>>>>>         vq->kickfd = file.fd;
>>>>>>
>>>>>>
>>>>>> Without that, the ready check will return ready before and after
>>>>>> the kickfd changed and the driver won't be notified.
>>>>>
>>>>> The driver will be notified in the next VHOST_USER_SET_VRING_ENABLE
>>>> message according to v1.
>>>>>
>>>>> One of our assumption we agreed on in the design mail is that it
>>>>> doesn't
>>>> make sense that QEMU will change queue configuration without enabling
>>>> the queue again.
>>>>> Because of that we decided to force calling state callback again
>>>>> when
>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the
>> queue is
>>>> already ready.
>>>>> So when driver/app see state enable->enable, it should take into
>>>>> account
>>>> that the queue configuration was probably changed.
>>>>>
>>>>> I think that this assumption is correct according to the QEMU code.
>>>>
>>>> Yes, this was our initial assumption.
>>>> But now looking into the details of the implementation, I find it is
>>>> even cleaner & clearer not to do this assumption.
>>>>
>>>>> That's why I prefer to collect all the ready checks callbacks (queue
>>>>> state and
>>>> device new\conf) to one function that will be called after the
>>>> message
>>>> handler:
>>>>> Pseudo:
>>>>>  vhost_user_update_ready_statuses() {
>>>>> 	switch (msg):
>>>>> 		case enable:
>>>>> 			if(enable is 1)
>>>>> 				force queue state =1.
>>>>> 		case callfd
>>>>> 		case kickfd
>>>>> 				.....
>>>>> 		Check queue and device ready + call callbacks if needed..
>>>>> 		Default
>>>>> 			Return;
>>>>> }
>>>>
>>>> I find it more natural to "invalidate" ready state where it is
>>>> handled (after vring_invalidate(), before setting new FD for call &
>>>> kick, ...)
>>>
>>> I think that if you go with this direction, if the first queue pair is invalidated,
>> you need to notify app\driver also about device ready change.
>>> Also it will cause 2 notifications to the driver instead of one in case of FD
>> change.
>>
>> You'll always end-up with two notifications, either Qemu has sent the disable
>> and so you'll have one notification for the disable and one for the enable, or
>> it didn't sent the disable and it will happen at old value invalidation time and
>> after new value is taken into account.
>>
> 
> I don't see it in current QEMU behavior.
> When working MQ I see that some virtqs get configuration message while they are in enabled state.
> Then, enable message is sent again later.

I guess you mean the first queue pair? And it would not be in ready
state as it would be the initial configuration of the queue?

>  
>>> Why not to take this correct assumption and update ready state only in one
>> point in the code instead of doing it in all the configuration handlers around?
>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
>>
>> I just looked closer at the Vhost-user spec, and I'm no more so sure this is a
>> correct assumption:
>>
>> "While processing the rings (whether they are enabled or not), client must
>> support changing some configuration aspects on the fly."
> 
> Ok, this doesn't explain how configuration is changed on the fly.

I agree it lacks a bit of clarity.

> As I mentioned, QEMU sends enable message always after configuration message.

Yes, but we should not do assumptions on current Qemu version when
possible. Better to be safe and follow the specification, it will be
more robust. There is also the Virtio-user PMD to take into account for
example.

Thanks,
Maxime

> 
>>> In addition it saves the style that already used in this function in:
>>> - vhost_user_check_and_alloc_queue_pair
>>> - 	switch (request) {
>>> 	case VHOST_USER_SET_FEATURES:
>>> 	case VHOST_USER_SET_PROTOCOL_FEATURES:
>>> 	case VHOST_USER_SET_OWNER:
>>> 	case VHOST_USER_SET_MEM_TABLE:
>>> 	case VHOST_USER_SET_LOG_BASE:
>>> 	case VHOST_USER_SET_LOG_FD:
>>> 	case VHOST_USER_SET_VRING_NUM:
>>> 	case VHOST_USER_SET_VRING_ADDR:
>>> 	case VHOST_USER_SET_VRING_BASE:
>>> 	case VHOST_USER_SET_VRING_KICK:
>>> 	case VHOST_USER_SET_VRING_CALL:
>>> 	case VHOST_USER_SET_VRING_ERR:
>>> 	case VHOST_USER_SET_VRING_ENABLE:
>>> 	case VHOST_USER_SEND_RARP:
>>> 	case VHOST_USER_NET_SET_MTU:
>>> 	case VHOST_USER_SET_SLAVE_REQ_FD:
>>> 			vhost_user_lock_all_queue_pairs(dev);
>>>
>>> Matan
>>>
>>>
>>>
>>>
>
  
Matan Azrad June 23, 2020, 9:02 a.m. UTC | #15
From: Maxime Coquelin:
> On 6/22/20 5:51 PM, Matan Azrad wrote:
> >
> >
> > From: Maxime Coquelin:
> >> On 6/22/20 3:43 PM, Matan Azrad wrote:
> >>>
> >>>
> >>> From: Maxime Coquelin:
> >>>> Sent: Monday, June 22, 2020 3:33 PM
> >>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>> <xiao.w.wang@intel.com>
> >>>> Cc: dev@dpdk.org
> >>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> >>>>
> >>>>
> >>>>
> >>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
> >>>>>
> >>>>> Hi Maxime
> >>>>>
> >>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >>>>>> Sent: Monday, June 22, 2020 11:56 AM
> >>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>> <xiao.w.wang@intel.com>
> >>>>>> Cc: dev@dpdk.org
> >>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>> definition
> >>>>>>
> >>>>>>
> >>>>>>
> >>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >>>>>>>> The issue is if you only check ready state only before and
> >>>>>>>> after the message affecting the ring is handled, it can be
> >>>>>>>> ready at both stages, while the rings have changed and state
> >>>>>>>> change callback should
> >>>>>> have been called.
> >>>>>>> But in this version I checked twice, before message handler and
> >>>>>>> after
> >>>>>> message handler, so it should catch any update.
> >>>>>>
> >>>>>> No, this is not enough, we have to check also during some
> >>>>>> handlers, so that the ready state is invalidated because
> >>>>>> sometimes it will be ready before and after the message handler but
> with different values.
> >>>>>>
> >>>>>> That's what I did in my example patch:
> >>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
> >> virtio_net
> >>>>>> **pdev, struct VhostUserMsg *msg,
> >>>>>>
> >>>>>> ...
> >>>>>>
> >>>>>>         if (vq->kickfd >= 0)
> >>>>>>                 close(vq->kickfd);
> >>>>>> +
> >>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >>>>>> +
> >>>>>> +       vhost_user_update_vring_state(dev, file.index);
> >>>>>> +
> >>>>>>         vq->kickfd = file.fd;
> >>>>>>
> >>>>>>
> >>>>>> Without that, the ready check will return ready before and after
> >>>>>> the kickfd changed and the driver won't be notified.
> >>>>>
> >>>>> The driver will be notified in the next
> >>>>> VHOST_USER_SET_VRING_ENABLE
> >>>> message according to v1.
> >>>>>
> >>>>> One of our assumption we agreed on in the design mail is that it
> >>>>> doesn't
> >>>> make sense that QEMU will change queue configuration without
> >>>> enabling the queue again.
> >>>>> Because of that we decided to force calling state callback again
> >>>>> when
> >>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the
> >> queue is
> >>>> already ready.
> >>>>> So when driver/app see state enable->enable, it should take into
> >>>>> account
> >>>> that the queue configuration was probably changed.
> >>>>>
> >>>>> I think that this assumption is correct according to the QEMU code.
> >>>>
> >>>> Yes, this was our initial assumption.
> >>>> But now looking into the details of the implementation, I find it
> >>>> is even cleaner & clearer not to do this assumption.
> >>>>
> >>>>> That's why I prefer to collect all the ready checks callbacks
> >>>>> (queue state and
> >>>> device new\conf) to one function that will be called after the
> >>>> message
> >>>> handler:
> >>>>> Pseudo:
> >>>>>  vhost_user_update_ready_statuses() {
> >>>>> 	switch (msg):
> >>>>> 		case enable:
> >>>>> 			if(enable is 1)
> >>>>> 				force queue state =1.
> >>>>> 		case callfd
> >>>>> 		case kickfd
> >>>>> 				.....
> >>>>> 		Check queue and device ready + call callbacks if needed..
> >>>>> 		Default
> >>>>> 			Return;
> >>>>> }
> >>>>
> >>>> I find it more natural to "invalidate" ready state where it is
> >>>> handled (after vring_invalidate(), before setting new FD for call &
> >>>> kick, ...)
> >>>
> >>> I think that if you go with this direction, if the first queue pair
> >>> is invalidated,
> >> you need to notify app\driver also about device ready change.
> >>> Also it will cause 2 notifications to the driver instead of one in
> >>> case of FD
> >> change.
> >>
> >> You'll always end-up with two notifications, either Qemu has sent the
> >> disable and so you'll have one notification for the disable and one
> >> for the enable, or it didn't sent the disable and it will happen at
> >> old value invalidation time and after new value is taken into account.
> >>
> >
> > I don't see it in current QEMU behavior.
> > When working MQ I see that some virtqs get configuration message while
> they are in enabled state.
> > Then, enable message is sent again later.
> 
> I guess you mean the first queue pair? And it would not be in ready state as it
> would be the initial configuration of the queue?

Even after initialization when queue is ready.

> >
> >>> Why not to take this correct assumption and update ready state only
> >>> in one
> >> point in the code instead of doing it in all the configuration handlers
> around?
> >>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
> >>
> >> I just looked closer at the Vhost-user spec, and I'm no more so sure
> >> this is a correct assumption:
> >>
> >> "While processing the rings (whether they are enabled or not), client
> >> must support changing some configuration aspects on the fly."
> >
> > Ok, this doesn't explain how configuration is changed on the fly.
> 
> I agree it lacks a bit of clarity.
> 
> > As I mentioned, QEMU sends enable message always after configuration
> message.
> 
> Yes, but we should not do assumptions on current Qemu version when
> possible. Better to be safe and follow the specification, it will be more robust.
> There is also the Virtio-user PMD to take into account for example.

I understand your point here but do you really want to be ready for any configuration update in run time?
What does it mean? How datatpath should handle configuration from control thread in run time while traffic is on?
For example, changing queue size \ addresses must stop traffic before...
Also changing FDs is very sensitive.

It doesn't make sense to me.

Also, according to "on the fly" direction we should not disable the queue unless enable message is coming to disable it.

In addition:
Do you really want to toggle vDPA drivers\app for any configuration message? It may cause queue recreation for each one (at least for mlx5).


> Thanks,
> Maxime
> 
> >
> >>> In addition it saves the style that already used in this function in:
> >>> - vhost_user_check_and_alloc_queue_pair
> >>> - 	switch (request) {
> >>> 	case VHOST_USER_SET_FEATURES:
> >>> 	case VHOST_USER_SET_PROTOCOL_FEATURES:
> >>> 	case VHOST_USER_SET_OWNER:
> >>> 	case VHOST_USER_SET_MEM_TABLE:
> >>> 	case VHOST_USER_SET_LOG_BASE:
> >>> 	case VHOST_USER_SET_LOG_FD:
> >>> 	case VHOST_USER_SET_VRING_NUM:
> >>> 	case VHOST_USER_SET_VRING_ADDR:
> >>> 	case VHOST_USER_SET_VRING_BASE:
> >>> 	case VHOST_USER_SET_VRING_KICK:
> >>> 	case VHOST_USER_SET_VRING_CALL:
> >>> 	case VHOST_USER_SET_VRING_ERR:
> >>> 	case VHOST_USER_SET_VRING_ENABLE:
> >>> 	case VHOST_USER_SEND_RARP:
> >>> 	case VHOST_USER_NET_SET_MTU:
> >>> 	case VHOST_USER_SET_SLAVE_REQ_FD:
> >>> 			vhost_user_lock_all_queue_pairs(dev);
> >>>
> >>> Matan
> >>>
> >>>
> >>>
> >>>
> >
  
Maxime Coquelin June 23, 2020, 9:19 a.m. UTC | #16
On 6/23/20 11:02 AM, Matan Azrad wrote:
> 
> 
> From: Maxime Coquelin:
>> On 6/22/20 5:51 PM, Matan Azrad wrote:
>>>
>>>
>>> From: Maxime Coquelin:
>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
>>>>>
>>>>>
>>>>> From: Maxime Coquelin:
>>>>>> Sent: Monday, June 22, 2020 3:33 PM
>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>> <xiao.w.wang@intel.com>
>>>>>> Cc: dev@dpdk.org
>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>>>>>
>>>>>>
>>>>>>
>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>>>>>
>>>>>>> Hi Maxime
>>>>>>>
>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>> Cc: dev@dpdk.org
>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>> definition
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>>>>>> The issue is if you only check ready state only before and
>>>>>>>>>> after the message affecting the ring is handled, it can be
>>>>>>>>>> ready at both stages, while the rings have changed and state
>>>>>>>>>> change callback should
>>>>>>>> have been called.
>>>>>>>>> But in this version I checked twice, before message handler and
>>>>>>>>> after
>>>>>>>> message handler, so it should catch any update.
>>>>>>>>
>>>>>>>> No, this is not enough, we have to check also during some
>>>>>>>> handlers, so that the ready state is invalidated because
>>>>>>>> sometimes it will be ready before and after the message handler but
>> with different values.
>>>>>>>>
>>>>>>>> That's what I did in my example patch:
>>>>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
>>>> virtio_net
>>>>>>>> **pdev, struct VhostUserMsg *msg,
>>>>>>>>
>>>>>>>> ...
>>>>>>>>
>>>>>>>>         if (vq->kickfd >= 0)
>>>>>>>>                 close(vq->kickfd);
>>>>>>>> +
>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>>>>>> +
>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>>>>>> +
>>>>>>>>         vq->kickfd = file.fd;
>>>>>>>>
>>>>>>>>
>>>>>>>> Without that, the ready check will return ready before and after
>>>>>>>> the kickfd changed and the driver won't be notified.
>>>>>>>
>>>>>>> The driver will be notified in the next
>>>>>>> VHOST_USER_SET_VRING_ENABLE
>>>>>> message according to v1.
>>>>>>>
>>>>>>> One of our assumption we agreed on in the design mail is that it
>>>>>>> doesn't
>>>>>> make sense that QEMU will change queue configuration without
>>>>>> enabling the queue again.
>>>>>>> Because of that we decided to force calling state callback again
>>>>>>> when
>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if the
>>>> queue is
>>>>>> already ready.
>>>>>>> So when driver/app see state enable->enable, it should take into
>>>>>>> account
>>>>>> that the queue configuration was probably changed.
>>>>>>>
>>>>>>> I think that this assumption is correct according to the QEMU code.
>>>>>>
>>>>>> Yes, this was our initial assumption.
>>>>>> But now looking into the details of the implementation, I find it
>>>>>> is even cleaner & clearer not to do this assumption.
>>>>>>
>>>>>>> That's why I prefer to collect all the ready checks callbacks
>>>>>>> (queue state and
>>>>>> device new\conf) to one function that will be called after the
>>>>>> message
>>>>>> handler:
>>>>>>> Pseudo:
>>>>>>>  vhost_user_update_ready_statuses() {
>>>>>>> 	switch (msg):
>>>>>>> 		case enable:
>>>>>>> 			if(enable is 1)
>>>>>>> 				force queue state =1.
>>>>>>> 		case callfd
>>>>>>> 		case kickfd
>>>>>>> 				.....
>>>>>>> 		Check queue and device ready + call callbacks if needed..
>>>>>>> 		Default
>>>>>>> 			Return;
>>>>>>> }
>>>>>>
>>>>>> I find it more natural to "invalidate" ready state where it is
>>>>>> handled (after vring_invalidate(), before setting new FD for call &
>>>>>> kick, ...)
>>>>>
>>>>> I think that if you go with this direction, if the first queue pair
>>>>> is invalidated,
>>>> you need to notify app\driver also about device ready change.
>>>>> Also it will cause 2 notifications to the driver instead of one in
>>>>> case of FD
>>>> change.
>>>>
>>>> You'll always end-up with two notifications, either Qemu has sent the
>>>> disable and so you'll have one notification for the disable and one
>>>> for the enable, or it didn't sent the disable and it will happen at
>>>> old value invalidation time and after new value is taken into account.
>>>>
>>>
>>> I don't see it in current QEMU behavior.
>>> When working MQ I see that some virtqs get configuration message while
>> they are in enabled state.
>>> Then, enable message is sent again later.
>>
>> I guess you mean the first queue pair? And it would not be in ready state as it
>> would be the initial configuration of the queue?
> 
> Even after initialization when queue is ready.
> 
>>>
>>>>> Why not to take this correct assumption and update ready state only
>>>>> in one
>>>> point in the code instead of doing it in all the configuration handlers
>> around?
>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
>>>>
>>>> I just looked closer at the Vhost-user spec, and I'm no more so sure
>>>> this is a correct assumption:
>>>>
>>>> "While processing the rings (whether they are enabled or not), client
>>>> must support changing some configuration aspects on the fly."
>>>
>>> Ok, this doesn't explain how configuration is changed on the fly.
>>
>> I agree it lacks a bit of clarity.
>>
>>> As I mentioned, QEMU sends enable message always after configuration
>> message.
>>
>> Yes, but we should not do assumptions on current Qemu version when
>> possible. Better to be safe and follow the specification, it will be more robust.
>> There is also the Virtio-user PMD to take into account for example.
> 
> I understand your point here but do you really want to be ready for any configuration update in run time?
> What does it mean? How datatpath should handle configuration from control thread in run time while traffic is on?
> For example, changing queue size \ addresses must stop traffic before...
> Also changing FDs is very sensitive.
> 
> It doesn't make sense to me.
> 
> Also, according to "on the fly" direction we should not disable the queue unless enable message is coming to disable it.
> 
> In addition:
> Do you really want to toggle vDPA drivers\app for any configuration message? It may cause queue recreation for each one (at least for mlx5).

I want to have something robust and maintainable.

These messages arriving after a queue have been configured once are rare
events, but this is usually the kind of things that cause maintenance
burden.

If you look at my example patch, you will understand that with my
proposal, there won't be any more state change notification than with
your proposal when Qemu or any other Vhost-user master send a disable
request before sending the request that impact the queue state.

It just adds more robustness if this unlikely event happens, by
invalidating the ring state to not ready before doing the actual ring
configuration change. So that this config change is not missed by the
vDPA driver or the application.

Maxime
  
Matan Azrad June 23, 2020, 11:53 a.m. UTC | #17
From: Maxime Coquelin:
> On 6/23/20 11:02 AM, Matan Azrad wrote:
> >
> >
> > From: Maxime Coquelin:
> >> On 6/22/20 5:51 PM, Matan Azrad wrote:
> >>>
> >>>
> >>> From: Maxime Coquelin:
> >>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
> >>>>>
> >>>>>
> >>>>> From: Maxime Coquelin:
> >>>>>> Sent: Monday, June 22, 2020 3:33 PM
> >>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>> <xiao.w.wang@intel.com>
> >>>>>> Cc: dev@dpdk.org
> >>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>> definition
> >>>>>>
> >>>>>>
> >>>>>>
> >>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
> >>>>>>>
> >>>>>>> Hi Maxime
> >>>>>>>
> >>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
> >>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>>>> <xiao.w.wang@intel.com>
> >>>>>>>> Cc: dev@dpdk.org
> >>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>>>> definition
> >>>>>>>>
> >>>>>>>>
> >>>>>>>>
> >>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >>>>>>>>>> The issue is if you only check ready state only before and
> >>>>>>>>>> after the message affecting the ring is handled, it can be
> >>>>>>>>>> ready at both stages, while the rings have changed and state
> >>>>>>>>>> change callback should
> >>>>>>>> have been called.
> >>>>>>>>> But in this version I checked twice, before message handler
> >>>>>>>>> and after
> >>>>>>>> message handler, so it should catch any update.
> >>>>>>>>
> >>>>>>>> No, this is not enough, we have to check also during some
> >>>>>>>> handlers, so that the ready state is invalidated because
> >>>>>>>> sometimes it will be ready before and after the message handler
> >>>>>>>> but
> >> with different values.
> >>>>>>>>
> >>>>>>>> That's what I did in my example patch:
> >>>>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
> >>>> virtio_net
> >>>>>>>> **pdev, struct VhostUserMsg *msg,
> >>>>>>>>
> >>>>>>>> ...
> >>>>>>>>
> >>>>>>>>         if (vq->kickfd >= 0)
> >>>>>>>>                 close(vq->kickfd);
> >>>>>>>> +
> >>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >>>>>>>> +
> >>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
> >>>>>>>> +
> >>>>>>>>         vq->kickfd = file.fd;
> >>>>>>>>
> >>>>>>>>
> >>>>>>>> Without that, the ready check will return ready before and
> >>>>>>>> after the kickfd changed and the driver won't be notified.
> >>>>>>>
> >>>>>>> The driver will be notified in the next
> >>>>>>> VHOST_USER_SET_VRING_ENABLE
> >>>>>> message according to v1.
> >>>>>>>
> >>>>>>> One of our assumption we agreed on in the design mail is that it
> >>>>>>> doesn't
> >>>>>> make sense that QEMU will change queue configuration without
> >>>>>> enabling the queue again.
> >>>>>>> Because of that we decided to force calling state callback again
> >>>>>>> when
> >>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if
> the
> >>>> queue is
> >>>>>> already ready.
> >>>>>>> So when driver/app see state enable->enable, it should take into
> >>>>>>> account
> >>>>>> that the queue configuration was probably changed.
> >>>>>>>
> >>>>>>> I think that this assumption is correct according to the QEMU code.
> >>>>>>
> >>>>>> Yes, this was our initial assumption.
> >>>>>> But now looking into the details of the implementation, I find it
> >>>>>> is even cleaner & clearer not to do this assumption.
> >>>>>>
> >>>>>>> That's why I prefer to collect all the ready checks callbacks
> >>>>>>> (queue state and
> >>>>>> device new\conf) to one function that will be called after the
> >>>>>> message
> >>>>>> handler:
> >>>>>>> Pseudo:
> >>>>>>>  vhost_user_update_ready_statuses() {
> >>>>>>> 	switch (msg):
> >>>>>>> 		case enable:
> >>>>>>> 			if(enable is 1)
> >>>>>>> 				force queue state =1.
> >>>>>>> 		case callfd
> >>>>>>> 		case kickfd
> >>>>>>> 				.....
> >>>>>>> 		Check queue and device ready + call callbacks if
> needed..
> >>>>>>> 		Default
> >>>>>>> 			Return;
> >>>>>>> }
> >>>>>>
> >>>>>> I find it more natural to "invalidate" ready state where it is
> >>>>>> handled (after vring_invalidate(), before setting new FD for call
> >>>>>> & kick, ...)
> >>>>>
> >>>>> I think that if you go with this direction, if the first queue
> >>>>> pair is invalidated,
> >>>> you need to notify app\driver also about device ready change.
> >>>>> Also it will cause 2 notifications to the driver instead of one in
> >>>>> case of FD
> >>>> change.
> >>>>
> >>>> You'll always end-up with two notifications, either Qemu has sent
> >>>> the disable and so you'll have one notification for the disable and
> >>>> one for the enable, or it didn't sent the disable and it will
> >>>> happen at old value invalidation time and after new value is taken into
> account.
> >>>>
> >>>
> >>> I don't see it in current QEMU behavior.
> >>> When working MQ I see that some virtqs get configuration message
> >>> while
> >> they are in enabled state.
> >>> Then, enable message is sent again later.
> >>
> >> I guess you mean the first queue pair? And it would not be in ready
> >> state as it would be the initial configuration of the queue?
> >
> > Even after initialization when queue is ready.
> >
> >>>
> >>>>> Why not to take this correct assumption and update ready state
> >>>>> only in one
> >>>> point in the code instead of doing it in all the configuration
> >>>> handlers
> >> around?
> >>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
> >>>>
> >>>> I just looked closer at the Vhost-user spec, and I'm no more so
> >>>> sure this is a correct assumption:
> >>>>
> >>>> "While processing the rings (whether they are enabled or not),
> >>>> client must support changing some configuration aspects on the fly."
> >>>
> >>> Ok, this doesn't explain how configuration is changed on the fly.
> >>
> >> I agree it lacks a bit of clarity.
> >>
> >>> As I mentioned, QEMU sends enable message always after configuration
> >> message.
> >>
> >> Yes, but we should not do assumptions on current Qemu version when
> >> possible. Better to be safe and follow the specification, it will be more
> robust.
> >> There is also the Virtio-user PMD to take into account for example.
> >
> > I understand your point here but do you really want to be ready for any
> configuration update in run time?
> > What does it mean? How datatpath should handle configuration from
> control thread in run time while traffic is on?
> > For example, changing queue size \ addresses must stop traffic before...
> > Also changing FDs is very sensitive.
> >
> > It doesn't make sense to me.
> >
> > Also, according to "on the fly" direction we should not disable the queue
> unless enable message is coming to disable it.

No response, so looks like you agree that it doesn't make sense.

> > In addition:
> > Do you really want to toggle vDPA drivers\app for any configuration
> message? It may cause queue recreation for each one (at least for mlx5).
> 
> I want to have something robust and maintainable.

Me too.

> These messages arriving after a queue have been configured once are rare
> events, but this is usually the kind of things that cause maintenance burden.

In case of guest poll mode (testpmd virtio) we all the time get callfd twice.

> If you look at my example patch, you will understand that with my proposal,
> there won't be any more state change notification than with your proposal
> when Qemu or any other Vhost-user master send a disable request before
> sending the request that impact the queue state.

we didn't talk about disable time - this one is very simple.

Yes, In case the queue is disabled your proposal doesn't send extra notification as my.
But in case the queue is ready, your proposal send extra not ready notification for kikfd,callfd,set_vring_base configurations.

> It just adds more robustness if this unlikely event happens, by invalidating
> the ring state to not ready before doing the actual ring configuration change.
> So that this config change is not missed by the vDPA driver or the application.

One more issue here is that there is some time that device is ready (already configured) and the first vittq-pair is not ready (your invalidate proposal for set_vring_base).
It doesn’t save the concept that device is ready only in case the first virtq-pair is ready.


I will not insist anymore on waiting for enable for notifying although I not fan with it.

So, I suggest to create 1 notification function to be called after message handler and before reply.
This function is the only one which notify ready states in the next options:

1. virtq ready state is changed in the queue.
2. virtq ready state stays on after configuration message handler.
3. device state will be enabled when the first queue pair is ready.


Matan



> Maxime
  
Maxime Coquelin June 23, 2020, 1:55 p.m. UTC | #18
Hi Matan,

On 6/23/20 1:53 PM, Matan Azrad wrote:
> 
> 
> From: Maxime Coquelin:
>> On 6/23/20 11:02 AM, Matan Azrad wrote:
>>>
>>>
>>> From: Maxime Coquelin:
>>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
>>>>>
>>>>>
>>>>> From: Maxime Coquelin:
>>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
>>>>>>>
>>>>>>>
>>>>>>> From: Maxime Coquelin:
>>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>> Cc: dev@dpdk.org
>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>> definition
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>>>>>>>
>>>>>>>>> Hi Maxime
>>>>>>>>>
>>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>> definition
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>>>>>>>> The issue is if you only check ready state only before and
>>>>>>>>>>>> after the message affecting the ring is handled, it can be
>>>>>>>>>>>> ready at both stages, while the rings have changed and state
>>>>>>>>>>>> change callback should
>>>>>>>>>> have been called.
>>>>>>>>>>> But in this version I checked twice, before message handler
>>>>>>>>>>> and after
>>>>>>>>>> message handler, so it should catch any update.
>>>>>>>>>>
>>>>>>>>>> No, this is not enough, we have to check also during some
>>>>>>>>>> handlers, so that the ready state is invalidated because
>>>>>>>>>> sometimes it will be ready before and after the message handler
>>>>>>>>>> but
>>>> with different values.
>>>>>>>>>>
>>>>>>>>>> That's what I did in my example patch:
>>>>>>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
>>>>>> virtio_net
>>>>>>>>>> **pdev, struct VhostUserMsg *msg,
>>>>>>>>>>
>>>>>>>>>> ...
>>>>>>>>>>
>>>>>>>>>>         if (vq->kickfd >= 0)
>>>>>>>>>>                 close(vq->kickfd);
>>>>>>>>>> +
>>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>>>>>>>> +
>>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>>>>>>>> +
>>>>>>>>>>         vq->kickfd = file.fd;
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Without that, the ready check will return ready before and
>>>>>>>>>> after the kickfd changed and the driver won't be notified.
>>>>>>>>>
>>>>>>>>> The driver will be notified in the next
>>>>>>>>> VHOST_USER_SET_VRING_ENABLE
>>>>>>>> message according to v1.
>>>>>>>>>
>>>>>>>>> One of our assumption we agreed on in the design mail is that it
>>>>>>>>> doesn't
>>>>>>>> make sense that QEMU will change queue configuration without
>>>>>>>> enabling the queue again.
>>>>>>>>> Because of that we decided to force calling state callback again
>>>>>>>>> when
>>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if
>> the
>>>>>> queue is
>>>>>>>> already ready.
>>>>>>>>> So when driver/app see state enable->enable, it should take into
>>>>>>>>> account
>>>>>>>> that the queue configuration was probably changed.
>>>>>>>>>
>>>>>>>>> I think that this assumption is correct according to the QEMU code.
>>>>>>>>
>>>>>>>> Yes, this was our initial assumption.
>>>>>>>> But now looking into the details of the implementation, I find it
>>>>>>>> is even cleaner & clearer not to do this assumption.
>>>>>>>>
>>>>>>>>> That's why I prefer to collect all the ready checks callbacks
>>>>>>>>> (queue state and
>>>>>>>> device new\conf) to one function that will be called after the
>>>>>>>> message
>>>>>>>> handler:
>>>>>>>>> Pseudo:
>>>>>>>>>  vhost_user_update_ready_statuses() {
>>>>>>>>> 	switch (msg):
>>>>>>>>> 		case enable:
>>>>>>>>> 			if(enable is 1)
>>>>>>>>> 				force queue state =1.
>>>>>>>>> 		case callfd
>>>>>>>>> 		case kickfd
>>>>>>>>> 				.....
>>>>>>>>> 		Check queue and device ready + call callbacks if
>> needed..
>>>>>>>>> 		Default
>>>>>>>>> 			Return;
>>>>>>>>> }
>>>>>>>>
>>>>>>>> I find it more natural to "invalidate" ready state where it is
>>>>>>>> handled (after vring_invalidate(), before setting new FD for call
>>>>>>>> & kick, ...)
>>>>>>>
>>>>>>> I think that if you go with this direction, if the first queue
>>>>>>> pair is invalidated,
>>>>>> you need to notify app\driver also about device ready change.
>>>>>>> Also it will cause 2 notifications to the driver instead of one in
>>>>>>> case of FD
>>>>>> change.
>>>>>>
>>>>>> You'll always end-up with two notifications, either Qemu has sent
>>>>>> the disable and so you'll have one notification for the disable and
>>>>>> one for the enable, or it didn't sent the disable and it will
>>>>>> happen at old value invalidation time and after new value is taken into
>> account.
>>>>>>
>>>>>
>>>>> I don't see it in current QEMU behavior.
>>>>> When working MQ I see that some virtqs get configuration message
>>>>> while
>>>> they are in enabled state.
>>>>> Then, enable message is sent again later.
>>>>
>>>> I guess you mean the first queue pair? And it would not be in ready
>>>> state as it would be the initial configuration of the queue?
>>>
>>> Even after initialization when queue is ready.
>>>
>>>>>
>>>>>>> Why not to take this correct assumption and update ready state
>>>>>>> only in one
>>>>>> point in the code instead of doing it in all the configuration
>>>>>> handlers
>>>> around?
>>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
>>>>>>
>>>>>> I just looked closer at the Vhost-user spec, and I'm no more so
>>>>>> sure this is a correct assumption:
>>>>>>
>>>>>> "While processing the rings (whether they are enabled or not),
>>>>>> client must support changing some configuration aspects on the fly."
>>>>>
>>>>> Ok, this doesn't explain how configuration is changed on the fly.
>>>>
>>>> I agree it lacks a bit of clarity.
>>>>
>>>>> As I mentioned, QEMU sends enable message always after configuration
>>>> message.
>>>>
>>>> Yes, but we should not do assumptions on current Qemu version when
>>>> possible. Better to be safe and follow the specification, it will be more
>> robust.
>>>> There is also the Virtio-user PMD to take into account for example.
>>>
>>> I understand your point here but do you really want to be ready for any
>> configuration update in run time?
>>> What does it mean? How datatpath should handle configuration from
>> control thread in run time while traffic is on?
>>> For example, changing queue size \ addresses must stop traffic before...
>>> Also changing FDs is very sensitive.
>>>
>>> It doesn't make sense to me.
>>>
>>> Also, according to "on the fly" direction we should not disable the queue
>> unless enable message is coming to disable it.
> 
> No response, so looks like you agree that it doesn't make sense.

No, my reply was general to all your comments.

With SW backend, I agree we don't need to disable the rings in case of
asynchronous changes to the ring because we protect it with a lock, so
we are sure the ring won't be accessed by another thread while doing the
change.

For vDPA case that's more problematic because we have no such locking
mechanism.

For example memory hotplug, Qemu does not seem to disable the queues so
we need to stop the vDPA device one way or another so that it does not
process the rings while the Vhost lib remaps the memory areas.

>>> In addition:
>>> Do you really want to toggle vDPA drivers\app for any configuration
>> message? It may cause queue recreation for each one (at least for mlx5).
>>
>> I want to have something robust and maintainable.
> 
> Me too.
> 
>> These messages arriving after a queue have been configured once are rare
>> events, but this is usually the kind of things that cause maintenance burden.
> 
> In case of guest poll mode (testpmd virtio) we all the time get callfd twice.

Right.

>> If you look at my example patch, you will understand that with my proposal,
>> there won't be any more state change notification than with your proposal
>> when Qemu or any other Vhost-user master send a disable request before
>> sending the request that impact the queue state.
> 
> we didn't talk about disable time - this one is very simple.
> 
> Yes, In case the queue is disabled your proposal doesn't send extra notification as my.
> But in case the queue is ready, your proposal send extra not ready notification for kikfd,callfd,set_vring_base configurations.

I think this is necessary for synchronization with the Vhost-user
master (in case the master asks for this synchronization, like
set_mem_table for instance when reply-ack is enabled).

>> It just adds more robustness if this unlikely event happens, by invalidating
>> the ring state to not ready before doing the actual ring configuration change.
>> So that this config change is not missed by the vDPA driver or the application.
> 
> One more issue here is that there is some time that device is ready (already configured) and the first vittq-pair is not ready (your invalidate proposal for set_vring_base).



> It doesn’t save the concept that device is ready only in case the first virtq-pair is ready.

I understand the spec as "the device is ready as soon as the first queue
pair is ready", but I might be wrong.

Do you suggest to call the dev_close() vDPA callback and the
destroy_device() application callback as soon as one of the ring of the
first queue pair receive a disable request or, with my patch, when one
of the rings receives a request that changes the ring state?

> 
> I will not insist anymore on waiting for enable for notifying although I not fan with it.
> 
> So, I suggest to create 1 notification function to be called after message handler and before reply.
> This function is the only one which notify ready states in the next options:
> 
> 1. virtq ready state is changed in the queue.
> 2. virtq ready state stays on after configuration message handler.
> 3. device state will be enabled when the first queue pair is ready.

IIUC, it will not disable the queues when there is a state change, is
that correct? If so, I think it does not work with memory hotplug case I
mentioned earlier.

Even for the callfd double change it can be problematic as Vhost-lib
will close the first one while it will still be used by the driver (Btw,
I see my example patch is also buggy in this regards, it should reset
the call_fd value in the virtqueue, then call
vhost_user_update_vring_state() and finally close the FD).

Thanks,
Maxime
> 
> Matan
> 
> 
> 
>> Maxime
>
  
Maxime Coquelin June 23, 2020, 2:33 p.m. UTC | #19
On 6/23/20 3:55 PM, Maxime Coquelin wrote:
> Hi Matan,
> 
> On 6/23/20 1:53 PM, Matan Azrad wrote:
>>
>>
>> From: Maxime Coquelin:
>>> On 6/23/20 11:02 AM, Matan Azrad wrote:
>>>>
>>>>
>>>> From: Maxime Coquelin:
>>>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
>>>>>>
>>>>>>
>>>>>> From: Maxime Coquelin:
>>>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
>>>>>>>>
>>>>>>>>
>>>>>>>> From: Maxime Coquelin:
>>>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>> definition
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>>>>>>>>
>>>>>>>>>> Hi Maxime
>>>>>>>>>>
>>>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>>> definition
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>>>>>>>>> The issue is if you only check ready state only before and
>>>>>>>>>>>>> after the message affecting the ring is handled, it can be
>>>>>>>>>>>>> ready at both stages, while the rings have changed and state
>>>>>>>>>>>>> change callback should
>>>>>>>>>>> have been called.
>>>>>>>>>>>> But in this version I checked twice, before message handler
>>>>>>>>>>>> and after
>>>>>>>>>>> message handler, so it should catch any update.
>>>>>>>>>>>
>>>>>>>>>>> No, this is not enough, we have to check also during some
>>>>>>>>>>> handlers, so that the ready state is invalidated because
>>>>>>>>>>> sometimes it will be ready before and after the message handler
>>>>>>>>>>> but
>>>>> with different values.
>>>>>>>>>>>
>>>>>>>>>>> That's what I did in my example patch:
>>>>>>>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
>>>>>>> virtio_net
>>>>>>>>>>> **pdev, struct VhostUserMsg *msg,
>>>>>>>>>>>
>>>>>>>>>>> ...
>>>>>>>>>>>
>>>>>>>>>>>         if (vq->kickfd >= 0)
>>>>>>>>>>>                 close(vq->kickfd);
>>>>>>>>>>> +
>>>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>>>>>>>>> +
>>>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>>>>>>>>> +
>>>>>>>>>>>         vq->kickfd = file.fd;
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> Without that, the ready check will return ready before and
>>>>>>>>>>> after the kickfd changed and the driver won't be notified.
>>>>>>>>>>
>>>>>>>>>> The driver will be notified in the next
>>>>>>>>>> VHOST_USER_SET_VRING_ENABLE
>>>>>>>>> message according to v1.
>>>>>>>>>>
>>>>>>>>>> One of our assumption we agreed on in the design mail is that it
>>>>>>>>>> doesn't
>>>>>>>>> make sense that QEMU will change queue configuration without
>>>>>>>>> enabling the queue again.
>>>>>>>>>> Because of that we decided to force calling state callback again
>>>>>>>>>> when
>>>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even if
>>> the
>>>>>>> queue is
>>>>>>>>> already ready.
>>>>>>>>>> So when driver/app see state enable->enable, it should take into
>>>>>>>>>> account
>>>>>>>>> that the queue configuration was probably changed.
>>>>>>>>>>
>>>>>>>>>> I think that this assumption is correct according to the QEMU code.
>>>>>>>>>
>>>>>>>>> Yes, this was our initial assumption.
>>>>>>>>> But now looking into the details of the implementation, I find it
>>>>>>>>> is even cleaner & clearer not to do this assumption.
>>>>>>>>>
>>>>>>>>>> That's why I prefer to collect all the ready checks callbacks
>>>>>>>>>> (queue state and
>>>>>>>>> device new\conf) to one function that will be called after the
>>>>>>>>> message
>>>>>>>>> handler:
>>>>>>>>>> Pseudo:
>>>>>>>>>>  vhost_user_update_ready_statuses() {
>>>>>>>>>> 	switch (msg):
>>>>>>>>>> 		case enable:
>>>>>>>>>> 			if(enable is 1)
>>>>>>>>>> 				force queue state =1.
>>>>>>>>>> 		case callfd
>>>>>>>>>> 		case kickfd
>>>>>>>>>> 				.....
>>>>>>>>>> 		Check queue and device ready + call callbacks if
>>> needed..
>>>>>>>>>> 		Default
>>>>>>>>>> 			Return;
>>>>>>>>>> }
>>>>>>>>>
>>>>>>>>> I find it more natural to "invalidate" ready state where it is
>>>>>>>>> handled (after vring_invalidate(), before setting new FD for call
>>>>>>>>> & kick, ...)
>>>>>>>>
>>>>>>>> I think that if you go with this direction, if the first queue
>>>>>>>> pair is invalidated,
>>>>>>> you need to notify app\driver also about device ready change.
>>>>>>>> Also it will cause 2 notifications to the driver instead of one in
>>>>>>>> case of FD
>>>>>>> change.
>>>>>>>
>>>>>>> You'll always end-up with two notifications, either Qemu has sent
>>>>>>> the disable and so you'll have one notification for the disable and
>>>>>>> one for the enable, or it didn't sent the disable and it will
>>>>>>> happen at old value invalidation time and after new value is taken into
>>> account.
>>>>>>>
>>>>>>
>>>>>> I don't see it in current QEMU behavior.
>>>>>> When working MQ I see that some virtqs get configuration message
>>>>>> while
>>>>> they are in enabled state.
>>>>>> Then, enable message is sent again later.
>>>>>
>>>>> I guess you mean the first queue pair? And it would not be in ready
>>>>> state as it would be the initial configuration of the queue?
>>>>
>>>> Even after initialization when queue is ready.
>>>>
>>>>>>
>>>>>>>> Why not to take this correct assumption and update ready state
>>>>>>>> only in one
>>>>>>> point in the code instead of doing it in all the configuration
>>>>>>> handlers
>>>>> around?
>>>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
>>>>>>>
>>>>>>> I just looked closer at the Vhost-user spec, and I'm no more so
>>>>>>> sure this is a correct assumption:
>>>>>>>
>>>>>>> "While processing the rings (whether they are enabled or not),
>>>>>>> client must support changing some configuration aspects on the fly."
>>>>>>
>>>>>> Ok, this doesn't explain how configuration is changed on the fly.
>>>>>
>>>>> I agree it lacks a bit of clarity.
>>>>>
>>>>>> As I mentioned, QEMU sends enable message always after configuration
>>>>> message.
>>>>>
>>>>> Yes, but we should not do assumptions on current Qemu version when
>>>>> possible. Better to be safe and follow the specification, it will be more
>>> robust.
>>>>> There is also the Virtio-user PMD to take into account for example.
>>>>
>>>> I understand your point here but do you really want to be ready for any
>>> configuration update in run time?
>>>> What does it mean? How datatpath should handle configuration from
>>> control thread in run time while traffic is on?
>>>> For example, changing queue size \ addresses must stop traffic before...
>>>> Also changing FDs is very sensitive.
>>>>
>>>> It doesn't make sense to me.
>>>>
>>>> Also, according to "on the fly" direction we should not disable the queue
>>> unless enable message is coming to disable it.
>>
>> No response, so looks like you agree that it doesn't make sense.
> 
> No, my reply was general to all your comments.
> 
> With SW backend, I agree we don't need to disable the rings in case of
> asynchronous changes to the ring because we protect it with a lock, so
> we are sure the ring won't be accessed by another thread while doing the
> change.
> 
> For vDPA case that's more problematic because we have no such locking
> mechanism.
> 
> For example memory hotplug, Qemu does not seem to disable the queues so
> we need to stop the vDPA device one way or another so that it does not
> process the rings while the Vhost lib remaps the memory areas.
> 
>>>> In addition:
>>>> Do you really want to toggle vDPA drivers\app for any configuration
>>> message? It may cause queue recreation for each one (at least for mlx5).
>>>
>>> I want to have something robust and maintainable.
>>
>> Me too.
>>
>>> These messages arriving after a queue have been configured once are rare
>>> events, but this is usually the kind of things that cause maintenance burden.
>>
>> In case of guest poll mode (testpmd virtio) we all the time get callfd twice.
> 
> Right.
> 
>>> If you look at my example patch, you will understand that with my proposal,
>>> there won't be any more state change notification than with your proposal
>>> when Qemu or any other Vhost-user master send a disable request before
>>> sending the request that impact the queue state.
>>
>> we didn't talk about disable time - this one is very simple.
>>
>> Yes, In case the queue is disabled your proposal doesn't send extra notification as my.
>> But in case the queue is ready, your proposal send extra not ready notification for kikfd,callfd,set_vring_base configurations.
> 
> I think this is necessary for synchronization with the Vhost-user
> master (in case the master asks for this synchronization, like
> set_mem_table for instance when reply-ack is enabled).
> 
>>> It just adds more robustness if this unlikely event happens, by invalidating
>>> the ring state to not ready before doing the actual ring configuration change.
>>> So that this config change is not missed by the vDPA driver or the application.
>>
>> One more issue here is that there is some time that device is ready (already configured) and the first vittq-pair is not ready (your invalidate proposal for set_vring_base).
> 

Sorry, I forgot to reply here.
I am not sure about what do you mean about my invalidate proposal for
set_vring_base?

> 
>> It doesn’t save the concept that device is ready only in case the first virtq-pair is ready.
> 
> I understand the spec as "the device is ready as soon as the first queue
> pair is ready", but I might be wrong.
> 
> Do you suggest to call the dev_close() vDPA callback and the
> destroy_device() application callback as soon as one of the ring of the
> first queue pair receive a disable request or, with my patch, when one
> of the rings receives a request that changes the ring state?
> 
>>
>> I will not insist anymore on waiting for enable for notifying although I not fan with it.
>>
>> So, I suggest to create 1 notification function to be called after message handler and before reply.
>> This function is the only one which notify ready states in the next options:
>>
>> 1. virtq ready state is changed in the queue.
>> 2. virtq ready state stays on after configuration message handler.
>> 3. device state will be enabled when the first queue pair is ready.
> 
> IIUC, it will not disable the queues when there is a state change, is
> that correct? If so, I think it does not work with memory hotplug case I
> mentioned earlier.
> 
> Even for the callfd double change it can be problematic as Vhost-lib
> will close the first one while it will still be used by the driver (Btw,
> I see my example patch is also buggy in this regards, it should reset
> the call_fd value in the virtqueue, then call
> vhost_user_update_vring_state() and finally close the FD).
> 
> Thanks,
> Maxime
>>
>> Matan
>>
>>
>>
>>> Maxime
>>
>
  
Matan Azrad June 23, 2020, 2:52 p.m. UTC | #20
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, June 23, 2020 4:56 PM
> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> <xiao.w.wang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> 
> Hi Matan,
> 
> On 6/23/20 1:53 PM, Matan Azrad wrote:
> >
> >
> > From: Maxime Coquelin:
> >> On 6/23/20 11:02 AM, Matan Azrad wrote:
> >>>
> >>>
> >>> From: Maxime Coquelin:
> >>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
> >>>>>
> >>>>>
> >>>>> From: Maxime Coquelin:
> >>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
> >>>>>>>
> >>>>>>>
> >>>>>>> From: Maxime Coquelin:
> >>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
> >>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>>>> <xiao.w.wang@intel.com>
> >>>>>>>> Cc: dev@dpdk.org
> >>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>>>> definition
> >>>>>>>>
> >>>>>>>>
> >>>>>>>>
> >>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
> >>>>>>>>>
> >>>>>>>>> Hi Maxime
> >>>>>>>>>
> >>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
> >>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>>>>>> <xiao.w.wang@intel.com>
> >>>>>>>>>> Cc: dev@dpdk.org
> >>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>>>>>> definition
> >>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >>>>>>>>>>>> The issue is if you only check ready state only before and
> >>>>>>>>>>>> after the message affecting the ring is handled, it can be
> >>>>>>>>>>>> ready at both stages, while the rings have changed and
> >>>>>>>>>>>> state change callback should
> >>>>>>>>>> have been called.
> >>>>>>>>>>> But in this version I checked twice, before message handler
> >>>>>>>>>>> and after
> >>>>>>>>>> message handler, so it should catch any update.
> >>>>>>>>>>
> >>>>>>>>>> No, this is not enough, we have to check also during some
> >>>>>>>>>> handlers, so that the ready state is invalidated because
> >>>>>>>>>> sometimes it will be ready before and after the message
> >>>>>>>>>> handler but
> >>>> with different values.
> >>>>>>>>>>
> >>>>>>>>>> That's what I did in my example patch:
> >>>>>>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
> >>>>>> virtio_net
> >>>>>>>>>> **pdev, struct VhostUserMsg *msg,
> >>>>>>>>>>
> >>>>>>>>>> ...
> >>>>>>>>>>
> >>>>>>>>>>         if (vq->kickfd >= 0)
> >>>>>>>>>>                 close(vq->kickfd);
> >>>>>>>>>> +
> >>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >>>>>>>>>> +
> >>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
> >>>>>>>>>> +
> >>>>>>>>>>         vq->kickfd = file.fd;
> >>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>> Without that, the ready check will return ready before and
> >>>>>>>>>> after the kickfd changed and the driver won't be notified.
> >>>>>>>>>
> >>>>>>>>> The driver will be notified in the next
> >>>>>>>>> VHOST_USER_SET_VRING_ENABLE
> >>>>>>>> message according to v1.
> >>>>>>>>>
> >>>>>>>>> One of our assumption we agreed on in the design mail is that
> >>>>>>>>> it doesn't
> >>>>>>>> make sense that QEMU will change queue configuration without
> >>>>>>>> enabling the queue again.
> >>>>>>>>> Because of that we decided to force calling state callback
> >>>>>>>>> again when
> >>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even
> if
> >> the
> >>>>>> queue is
> >>>>>>>> already ready.
> >>>>>>>>> So when driver/app see state enable->enable, it should take
> >>>>>>>>> into account
> >>>>>>>> that the queue configuration was probably changed.
> >>>>>>>>>
> >>>>>>>>> I think that this assumption is correct according to the QEMU
> code.
> >>>>>>>>
> >>>>>>>> Yes, this was our initial assumption.
> >>>>>>>> But now looking into the details of the implementation, I find
> >>>>>>>> it is even cleaner & clearer not to do this assumption.
> >>>>>>>>
> >>>>>>>>> That's why I prefer to collect all the ready checks callbacks
> >>>>>>>>> (queue state and
> >>>>>>>> device new\conf) to one function that will be called after the
> >>>>>>>> message
> >>>>>>>> handler:
> >>>>>>>>> Pseudo:
> >>>>>>>>>  vhost_user_update_ready_statuses() {
> >>>>>>>>> 	switch (msg):
> >>>>>>>>> 		case enable:
> >>>>>>>>> 			if(enable is 1)
> >>>>>>>>> 				force queue state =1.
> >>>>>>>>> 		case callfd
> >>>>>>>>> 		case kickfd
> >>>>>>>>> 				.....
> >>>>>>>>> 		Check queue and device ready + call callbacks if
> >> needed..
> >>>>>>>>> 		Default
> >>>>>>>>> 			Return;
> >>>>>>>>> }
> >>>>>>>>
> >>>>>>>> I find it more natural to "invalidate" ready state where it is
> >>>>>>>> handled (after vring_invalidate(), before setting new FD for
> >>>>>>>> call & kick, ...)
> >>>>>>>
> >>>>>>> I think that if you go with this direction, if the first queue
> >>>>>>> pair is invalidated,
> >>>>>> you need to notify app\driver also about device ready change.
> >>>>>>> Also it will cause 2 notifications to the driver instead of one
> >>>>>>> in case of FD
> >>>>>> change.
> >>>>>>
> >>>>>> You'll always end-up with two notifications, either Qemu has sent
> >>>>>> the disable and so you'll have one notification for the disable
> >>>>>> and one for the enable, or it didn't sent the disable and it will
> >>>>>> happen at old value invalidation time and after new value is
> >>>>>> taken into
> >> account.
> >>>>>>
> >>>>>
> >>>>> I don't see it in current QEMU behavior.
> >>>>> When working MQ I see that some virtqs get configuration message
> >>>>> while
> >>>> they are in enabled state.
> >>>>> Then, enable message is sent again later.
> >>>>
> >>>> I guess you mean the first queue pair? And it would not be in ready
> >>>> state as it would be the initial configuration of the queue?
> >>>
> >>> Even after initialization when queue is ready.
> >>>
> >>>>>
> >>>>>>> Why not to take this correct assumption and update ready state
> >>>>>>> only in one
> >>>>>> point in the code instead of doing it in all the configuration
> >>>>>> handlers
> >>>> around?
> >>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
> >>>>>>
> >>>>>> I just looked closer at the Vhost-user spec, and I'm no more so
> >>>>>> sure this is a correct assumption:
> >>>>>>
> >>>>>> "While processing the rings (whether they are enabled or not),
> >>>>>> client must support changing some configuration aspects on the fly."
> >>>>>
> >>>>> Ok, this doesn't explain how configuration is changed on the fly.
> >>>>
> >>>> I agree it lacks a bit of clarity.
> >>>>
> >>>>> As I mentioned, QEMU sends enable message always after
> >>>>> configuration
> >>>> message.
> >>>>
> >>>> Yes, but we should not do assumptions on current Qemu version when
> >>>> possible. Better to be safe and follow the specification, it will
> >>>> be more
> >> robust.
> >>>> There is also the Virtio-user PMD to take into account for example.
> >>>
> >>> I understand your point here but do you really want to be ready for
> >>> any
> >> configuration update in run time?
> >>> What does it mean? How datatpath should handle configuration from
> >> control thread in run time while traffic is on?
> >>> For example, changing queue size \ addresses must stop traffic before...
> >>> Also changing FDs is very sensitive.
> >>>
> >>> It doesn't make sense to me.
> >>>
> >>> Also, according to "on the fly" direction we should not disable the
> >>> queue
> >> unless enable message is coming to disable it.
> >
> > No response, so looks like you agree that it doesn't make sense.
> 
> No, my reply was general to all your comments.
> 
> With SW backend, I agree we don't need to disable the rings in case of
> asynchronous changes to the ring because we protect it with a lock, so we
> are sure the ring won't be accessed by another thread while doing the
> change.
> 
> For vDPA case that's more problematic because we have no such locking
> mechanism.
> 
> For example memory hotplug, Qemu does not seem to disable the queues
> so we need to stop the vDPA device one way or another so that it does not
> process the rings while the Vhost lib remaps the memory areas.
> 
> >>> In addition:
> >>> Do you really want to toggle vDPA drivers\app for any configuration
> >> message? It may cause queue recreation for each one (at least for mlx5).
> >>
> >> I want to have something robust and maintainable.
> >
> > Me too.
> >
> >> These messages arriving after a queue have been configured once are
> >> rare events, but this is usually the kind of things that cause maintenance
> burden.
> >
> > In case of guest poll mode (testpmd virtio) we all the time get callfd twice.
> 
> Right.
> 
> >> If you look at my example patch, you will understand that with my
> >> proposal, there won't be any more state change notification than with
> >> your proposal when Qemu or any other Vhost-user master send a disable
> >> request before sending the request that impact the queue state.
> >
> > we didn't talk about disable time - this one is very simple.
> >
> > Yes, In case the queue is disabled your proposal doesn't send extra
> notification as my.
> > But in case the queue is ready, your proposal send extra not ready
> notification for kikfd,callfd,set_vring_base configurations.
> 
> I think this is necessary for synchronization with the Vhost-user master (in
> case the master asks for this synchronization, like set_mem_table for
> instance when reply-ack is enabled).
> 
> >> It just adds more robustness if this unlikely event happens, by
> >> invalidating the ring state to not ready before doing the actual ring
> configuration change.
> >> So that this config change is not missed by the vDPA driver or the
> application.
> >
> > One more issue here is that there is some time that device is ready (already
> configured) and the first vittq-pair is not ready (your invalidate proposal for
> set_vring_base).
> 
> 
> 
> > It doesn’t save the concept that device is ready only in case the first virtq-
> pair is ready.
> 
> I understand the spec as "the device is ready as soon as the first queue pair is
> ready", but I might be wrong.
> 
> Do you suggest to call the dev_close() vDPA callback and the
> destroy_device() application callback as soon as one of the ring of the first
> queue pair receive a disable request or, with my patch, when one of the
> rings receives a request that changes the ring state?

I means, your proposal actually may make first virtq-pair ready state disabled when device ready.
So, yes, it leads to call device close\destroy.

> > I will not insist anymore on waiting for enable for notifying although I not
> fan with it.
> >
> > So, I suggest to create 1 notification function to be called after message
> handler and before reply.
> > This function is the only one which notify ready states in the next options:
> >
> > 1. virtq ready state is changed in the queue.
> > 2. virtq ready state stays on after configuration message handler.
> > 3. device state will be enabled when the first queue pair is ready.
> 
> IIUC, it will not disable the queues when there is a state change, is that
> correct? If so, I think it does not work with memory hotplug case I mentioned
> earlier.

It will do enable again which mean - something was modified.

> Even for the callfd double change it can be problematic as Vhost-lib will close
> the first one while it will still be used by the driver (Btw, I see my example
> patch is also buggy in this regards, it should reset the call_fd value in the
> virtqueue, then call
> vhost_user_update_vring_state() and finally close the FD).

Yes, this one leads for different handle for each message.

Maybe it leads for new queue modify operation.
So, queue doesn't send the state - just does configuration change on the fly.

What do you think?

 
> Thanks,
> Maxime
> >
> > Matan
> >
> >
> >
> >> Maxime
> >
  
Maxime Coquelin June 23, 2020, 3:18 p.m. UTC | #21
On 6/23/20 4:52 PM, Matan Azrad wrote:
> 
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Tuesday, June 23, 2020 4:56 PM
>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>> <xiao.w.wang@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>
>> Hi Matan,
>>
>> On 6/23/20 1:53 PM, Matan Azrad wrote:
>>>
>>>
>>> From: Maxime Coquelin:
>>>> On 6/23/20 11:02 AM, Matan Azrad wrote:
>>>>>
>>>>>
>>>>> From: Maxime Coquelin:
>>>>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
>>>>>>>
>>>>>>>
>>>>>>> From: Maxime Coquelin:
>>>>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> From: Maxime Coquelin:
>>>>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>> definition
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>>>>>>>>>
>>>>>>>>>>> Hi Maxime
>>>>>>>>>>>
>>>>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>>>> definition
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>>>>>>>>>> The issue is if you only check ready state only before and
>>>>>>>>>>>>>> after the message affecting the ring is handled, it can be
>>>>>>>>>>>>>> ready at both stages, while the rings have changed and
>>>>>>>>>>>>>> state change callback should
>>>>>>>>>>>> have been called.
>>>>>>>>>>>>> But in this version I checked twice, before message handler
>>>>>>>>>>>>> and after
>>>>>>>>>>>> message handler, so it should catch any update.
>>>>>>>>>>>>
>>>>>>>>>>>> No, this is not enough, we have to check also during some
>>>>>>>>>>>> handlers, so that the ready state is invalidated because
>>>>>>>>>>>> sometimes it will be ready before and after the message
>>>>>>>>>>>> handler but
>>>>>> with different values.
>>>>>>>>>>>>
>>>>>>>>>>>> That's what I did in my example patch:
>>>>>>>>>>>> @@ -1847,15 +1892,16 @@ vhost_user_set_vring_kick(struct
>>>>>>>> virtio_net
>>>>>>>>>>>> **pdev, struct VhostUserMsg *msg,
>>>>>>>>>>>>
>>>>>>>>>>>> ...
>>>>>>>>>>>>
>>>>>>>>>>>>         if (vq->kickfd >= 0)
>>>>>>>>>>>>                 close(vq->kickfd);
>>>>>>>>>>>> +
>>>>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>>>>>>>>>> +
>>>>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>>>>>>>>>> +
>>>>>>>>>>>>         vq->kickfd = file.fd;
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> Without that, the ready check will return ready before and
>>>>>>>>>>>> after the kickfd changed and the driver won't be notified.
>>>>>>>>>>>
>>>>>>>>>>> The driver will be notified in the next
>>>>>>>>>>> VHOST_USER_SET_VRING_ENABLE
>>>>>>>>>> message according to v1.
>>>>>>>>>>>
>>>>>>>>>>> One of our assumption we agreed on in the design mail is that
>>>>>>>>>>> it doesn't
>>>>>>>>>> make sense that QEMU will change queue configuration without
>>>>>>>>>> enabling the queue again.
>>>>>>>>>>> Because of that we decided to force calling state callback
>>>>>>>>>>> again when
>>>>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message even
>> if
>>>> the
>>>>>>>> queue is
>>>>>>>>>> already ready.
>>>>>>>>>>> So when driver/app see state enable->enable, it should take
>>>>>>>>>>> into account
>>>>>>>>>> that the queue configuration was probably changed.
>>>>>>>>>>>
>>>>>>>>>>> I think that this assumption is correct according to the QEMU
>> code.
>>>>>>>>>>
>>>>>>>>>> Yes, this was our initial assumption.
>>>>>>>>>> But now looking into the details of the implementation, I find
>>>>>>>>>> it is even cleaner & clearer not to do this assumption.
>>>>>>>>>>
>>>>>>>>>>> That's why I prefer to collect all the ready checks callbacks
>>>>>>>>>>> (queue state and
>>>>>>>>>> device new\conf) to one function that will be called after the
>>>>>>>>>> message
>>>>>>>>>> handler:
>>>>>>>>>>> Pseudo:
>>>>>>>>>>>  vhost_user_update_ready_statuses() {
>>>>>>>>>>> 	switch (msg):
>>>>>>>>>>> 		case enable:
>>>>>>>>>>> 			if(enable is 1)
>>>>>>>>>>> 				force queue state =1.
>>>>>>>>>>> 		case callfd
>>>>>>>>>>> 		case kickfd
>>>>>>>>>>> 				.....
>>>>>>>>>>> 		Check queue and device ready + call callbacks if
>>>> needed..
>>>>>>>>>>> 		Default
>>>>>>>>>>> 			Return;
>>>>>>>>>>> }
>>>>>>>>>>
>>>>>>>>>> I find it more natural to "invalidate" ready state where it is
>>>>>>>>>> handled (after vring_invalidate(), before setting new FD for
>>>>>>>>>> call & kick, ...)
>>>>>>>>>
>>>>>>>>> I think that if you go with this direction, if the first queue
>>>>>>>>> pair is invalidated,
>>>>>>>> you need to notify app\driver also about device ready change.
>>>>>>>>> Also it will cause 2 notifications to the driver instead of one
>>>>>>>>> in case of FD
>>>>>>>> change.
>>>>>>>>
>>>>>>>> You'll always end-up with two notifications, either Qemu has sent
>>>>>>>> the disable and so you'll have one notification for the disable
>>>>>>>> and one for the enable, or it didn't sent the disable and it will
>>>>>>>> happen at old value invalidation time and after new value is
>>>>>>>> taken into
>>>> account.
>>>>>>>>
>>>>>>>
>>>>>>> I don't see it in current QEMU behavior.
>>>>>>> When working MQ I see that some virtqs get configuration message
>>>>>>> while
>>>>>> they are in enabled state.
>>>>>>> Then, enable message is sent again later.
>>>>>>
>>>>>> I guess you mean the first queue pair? And it would not be in ready
>>>>>> state as it would be the initial configuration of the queue?
>>>>>
>>>>> Even after initialization when queue is ready.
>>>>>
>>>>>>>
>>>>>>>>> Why not to take this correct assumption and update ready state
>>>>>>>>> only in one
>>>>>>>> point in the code instead of doing it in all the configuration
>>>>>>>> handlers
>>>>>> around?
>>>>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
>>>>>>>>
>>>>>>>> I just looked closer at the Vhost-user spec, and I'm no more so
>>>>>>>> sure this is a correct assumption:
>>>>>>>>
>>>>>>>> "While processing the rings (whether they are enabled or not),
>>>>>>>> client must support changing some configuration aspects on the fly."
>>>>>>>
>>>>>>> Ok, this doesn't explain how configuration is changed on the fly.
>>>>>>
>>>>>> I agree it lacks a bit of clarity.
>>>>>>
>>>>>>> As I mentioned, QEMU sends enable message always after
>>>>>>> configuration
>>>>>> message.
>>>>>>
>>>>>> Yes, but we should not do assumptions on current Qemu version when
>>>>>> possible. Better to be safe and follow the specification, it will
>>>>>> be more
>>>> robust.
>>>>>> There is also the Virtio-user PMD to take into account for example.
>>>>>
>>>>> I understand your point here but do you really want to be ready for
>>>>> any
>>>> configuration update in run time?
>>>>> What does it mean? How datatpath should handle configuration from
>>>> control thread in run time while traffic is on?
>>>>> For example, changing queue size \ addresses must stop traffic before...
>>>>> Also changing FDs is very sensitive.
>>>>>
>>>>> It doesn't make sense to me.
>>>>>
>>>>> Also, according to "on the fly" direction we should not disable the
>>>>> queue
>>>> unless enable message is coming to disable it.
>>>
>>> No response, so looks like you agree that it doesn't make sense.
>>
>> No, my reply was general to all your comments.
>>
>> With SW backend, I agree we don't need to disable the rings in case of
>> asynchronous changes to the ring because we protect it with a lock, so we
>> are sure the ring won't be accessed by another thread while doing the
>> change.
>>
>> For vDPA case that's more problematic because we have no such locking
>> mechanism.
>>
>> For example memory hotplug, Qemu does not seem to disable the queues
>> so we need to stop the vDPA device one way or another so that it does not
>> process the rings while the Vhost lib remaps the memory areas.
>>
>>>>> In addition:
>>>>> Do you really want to toggle vDPA drivers\app for any configuration
>>>> message? It may cause queue recreation for each one (at least for mlx5).
>>>>
>>>> I want to have something robust and maintainable.
>>>
>>> Me too.
>>>
>>>> These messages arriving after a queue have been configured once are
>>>> rare events, but this is usually the kind of things that cause maintenance
>> burden.
>>>
>>> In case of guest poll mode (testpmd virtio) we all the time get callfd twice.
>>
>> Right.
>>
>>>> If you look at my example patch, you will understand that with my
>>>> proposal, there won't be any more state change notification than with
>>>> your proposal when Qemu or any other Vhost-user master send a disable
>>>> request before sending the request that impact the queue state.
>>>
>>> we didn't talk about disable time - this one is very simple.
>>>
>>> Yes, In case the queue is disabled your proposal doesn't send extra
>> notification as my.
>>> But in case the queue is ready, your proposal send extra not ready
>> notification for kikfd,callfd,set_vring_base configurations.
>>
>> I think this is necessary for synchronization with the Vhost-user master (in
>> case the master asks for this synchronization, like set_mem_table for
>> instance when reply-ack is enabled).
>>
>>>> It just adds more robustness if this unlikely event happens, by
>>>> invalidating the ring state to not ready before doing the actual ring
>> configuration change.
>>>> So that this config change is not missed by the vDPA driver or the
>> application.
>>>
>>> One more issue here is that there is some time that device is ready (already
>> configured) and the first vittq-pair is not ready (your invalidate proposal for
>> set_vring_base).
>>
>>
>>
>>> It doesn’t save the concept that device is ready only in case the first virtq-
>> pair is ready.
>>
>> I understand the spec as "the device is ready as soon as the first queue pair is
>> ready", but I might be wrong.
>>
>> Do you suggest to call the dev_close() vDPA callback and the
>> destroy_device() application callback as soon as one of the ring of the first
>> queue pair receive a disable request or, with my patch, when one of the
>> rings receives a request that changes the ring state?
> 
> I means, your proposal actually may make first virtq-pair ready state disabled when device ready.
> So, yes, it leads to call device close\destroy.

No it doesn't, there is no call to .dev_close()/.destroy_device() with
my patch if first queue pair gets disabled.

>>> I will not insist anymore on waiting for enable for notifying although I not
>> fan with it.
>>>
>>> So, I suggest to create 1 notification function to be called after message
>> handler and before reply.
>>> This function is the only one which notify ready states in the next options:
>>>
>>> 1. virtq ready state is changed in the queue.
>>> 2. virtq ready state stays on after configuration message handler.
>>> 3. device state will be enabled when the first queue pair is ready.
>>
>> IIUC, it will not disable the queues when there is a state change, is that
>> correct? If so, I think it does not work with memory hotplug case I mentioned
>> earlier.
> 
> It will do enable again which mean - something was modified.

Ok, thanks for the clarification.

I think it is not enough for the examples I gave below. For
set_mem_table, we need to stop the device from processing the vrings
before the set_mem_table handler calls the munmap(), and re-enable it
after the mmap() (I did that wrong in my example patch, I just did
that after the munmap/mmap happened, which is too late).

>> Even for the callfd double change it can be problematic as Vhost-lib will close
>> the first one while it will still be used by the driver (Btw, I see my example
>> patch is also buggy in this regards, it should reset the call_fd value in the
>> virtqueue, then call
>> vhost_user_update_vring_state() and finally close the FD).
> 
> Yes, this one leads for different handle for each message.
> 
> Maybe it leads for new queue modify operation.
> So, queue doesn't send the state - just does configuration change on the fly.
> 
> What do you think?

I think that configuration on the fly doesn't fly.
We would at least need to stop the device from processing the rings for
memory hotplug case, so why not just send a disable notification?

And for the double callfd, that does not look right to me not to request
the driver to stop using it before it is closed, isn't it?

Thanks,
Maxime

>  
>> Thanks,
>> Maxime
>>>
>>> Matan
>>>
>>>
>>>
>>>> Maxime
>>>
>
  
Matan Azrad June 24, 2020, 5:54 a.m. UTC | #22
Ho Maxime

Good morning

From: Maxime Coquelin:
> On 6/23/20 4:52 PM, Matan Azrad wrote:
> >
> >
> >> -----Original Message-----
> >> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >> Sent: Tuesday, June 23, 2020 4:56 PM
> >> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >> <xiao.w.wang@intel.com>
> >> Cc: dev@dpdk.org
> >> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> >>
> >> Hi Matan,
> >>
> >> On 6/23/20 1:53 PM, Matan Azrad wrote:
> >>>
> >>>
> >>> From: Maxime Coquelin:
> >>>> On 6/23/20 11:02 AM, Matan Azrad wrote:
> >>>>>
> >>>>>
> >>>>> From: Maxime Coquelin:
> >>>>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
> >>>>>>>
> >>>>>>>
> >>>>>>> From: Maxime Coquelin:
> >>>>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
> >>>>>>>>>
> >>>>>>>>>
> >>>>>>>>> From: Maxime Coquelin:
> >>>>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
> >>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>>>>>> <xiao.w.wang@intel.com>
> >>>>>>>>>> Cc: dev@dpdk.org
> >>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>>>>>> definition
> >>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>>
> >>>>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
> >>>>>>>>>>>
> >>>>>>>>>>> Hi Maxime
> >>>>>>>>>>>
> >>>>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >>>>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
> >>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>>>>>>>> <xiao.w.wang@intel.com>
> >>>>>>>>>>>> Cc: dev@dpdk.org
> >>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>>>>>>>> definition
> >>>>>>>>>>>>
> >>>>>>>>>>>>
> >>>>>>>>>>>>
> >>>>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >>>>>>>>>>>>>> The issue is if you only check ready state only before
> >>>>>>>>>>>>>> and after the message affecting the ring is handled, it
> >>>>>>>>>>>>>> can be ready at both stages, while the rings have changed
> >>>>>>>>>>>>>> and state change callback should
> >>>>>>>>>>>> have been called.
> >>>>>>>>>>>>> But in this version I checked twice, before message
> >>>>>>>>>>>>> handler and after
> >>>>>>>>>>>> message handler, so it should catch any update.
> >>>>>>>>>>>>
> >>>>>>>>>>>> No, this is not enough, we have to check also during some
> >>>>>>>>>>>> handlers, so that the ready state is invalidated because
> >>>>>>>>>>>> sometimes it will be ready before and after the message
> >>>>>>>>>>>> handler but
> >>>>>> with different values.
> >>>>>>>>>>>>
> >>>>>>>>>>>> That's what I did in my example patch:
> >>>>>>>>>>>> @@ -1847,15 +1892,16 @@
> vhost_user_set_vring_kick(struct
> >>>>>>>> virtio_net
> >>>>>>>>>>>> **pdev, struct VhostUserMsg *msg,
> >>>>>>>>>>>>
> >>>>>>>>>>>> ...
> >>>>>>>>>>>>
> >>>>>>>>>>>>         if (vq->kickfd >= 0)
> >>>>>>>>>>>>                 close(vq->kickfd);
> >>>>>>>>>>>> +
> >>>>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >>>>>>>>>>>> +
> >>>>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
> >>>>>>>>>>>> +
> >>>>>>>>>>>>         vq->kickfd = file.fd;
> >>>>>>>>>>>>
> >>>>>>>>>>>>
> >>>>>>>>>>>> Without that, the ready check will return ready before and
> >>>>>>>>>>>> after the kickfd changed and the driver won't be notified.
> >>>>>>>>>>>
> >>>>>>>>>>> The driver will be notified in the next
> >>>>>>>>>>> VHOST_USER_SET_VRING_ENABLE
> >>>>>>>>>> message according to v1.
> >>>>>>>>>>>
> >>>>>>>>>>> One of our assumption we agreed on in the design mail is
> >>>>>>>>>>> that it doesn't
> >>>>>>>>>> make sense that QEMU will change queue configuration
> without
> >>>>>>>>>> enabling the queue again.
> >>>>>>>>>>> Because of that we decided to force calling state callback
> >>>>>>>>>>> again when
> >>>>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message
> even
> >> if
> >>>> the
> >>>>>>>> queue is
> >>>>>>>>>> already ready.
> >>>>>>>>>>> So when driver/app see state enable->enable, it should take
> >>>>>>>>>>> into account
> >>>>>>>>>> that the queue configuration was probably changed.
> >>>>>>>>>>>
> >>>>>>>>>>> I think that this assumption is correct according to the
> >>>>>>>>>>> QEMU
> >> code.
> >>>>>>>>>>
> >>>>>>>>>> Yes, this was our initial assumption.
> >>>>>>>>>> But now looking into the details of the implementation, I
> >>>>>>>>>> find it is even cleaner & clearer not to do this assumption.
> >>>>>>>>>>
> >>>>>>>>>>> That's why I prefer to collect all the ready checks
> >>>>>>>>>>> callbacks (queue state and
> >>>>>>>>>> device new\conf) to one function that will be called after
> >>>>>>>>>> the message
> >>>>>>>>>> handler:
> >>>>>>>>>>> Pseudo:
> >>>>>>>>>>>  vhost_user_update_ready_statuses() {
> >>>>>>>>>>> 	switch (msg):
> >>>>>>>>>>> 		case enable:
> >>>>>>>>>>> 			if(enable is 1)
> >>>>>>>>>>> 				force queue state =1.
> >>>>>>>>>>> 		case callfd
> >>>>>>>>>>> 		case kickfd
> >>>>>>>>>>> 				.....
> >>>>>>>>>>> 		Check queue and device ready + call callbacks if
> >>>> needed..
> >>>>>>>>>>> 		Default
> >>>>>>>>>>> 			Return;
> >>>>>>>>>>> }
> >>>>>>>>>>
> >>>>>>>>>> I find it more natural to "invalidate" ready state where it
> >>>>>>>>>> is handled (after vring_invalidate(), before setting new FD
> >>>>>>>>>> for call & kick, ...)
> >>>>>>>>>
> >>>>>>>>> I think that if you go with this direction, if the first queue
> >>>>>>>>> pair is invalidated,
> >>>>>>>> you need to notify app\driver also about device ready change.
> >>>>>>>>> Also it will cause 2 notifications to the driver instead of
> >>>>>>>>> one in case of FD
> >>>>>>>> change.
> >>>>>>>>
> >>>>>>>> You'll always end-up with two notifications, either Qemu has
> >>>>>>>> sent the disable and so you'll have one notification for the
> >>>>>>>> disable and one for the enable, or it didn't sent the disable
> >>>>>>>> and it will happen at old value invalidation time and after new
> >>>>>>>> value is taken into
> >>>> account.
> >>>>>>>>
> >>>>>>>
> >>>>>>> I don't see it in current QEMU behavior.
> >>>>>>> When working MQ I see that some virtqs get configuration
> message
> >>>>>>> while
> >>>>>> they are in enabled state.
> >>>>>>> Then, enable message is sent again later.
> >>>>>>
> >>>>>> I guess you mean the first queue pair? And it would not be in
> >>>>>> ready state as it would be the initial configuration of the queue?
> >>>>>
> >>>>> Even after initialization when queue is ready.
> >>>>>
> >>>>>>>
> >>>>>>>>> Why not to take this correct assumption and update ready state
> >>>>>>>>> only in one
> >>>>>>>> point in the code instead of doing it in all the configuration
> >>>>>>>> handlers
> >>>>>> around?
> >>>>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
> >>>>>>>>
> >>>>>>>> I just looked closer at the Vhost-user spec, and I'm no more so
> >>>>>>>> sure this is a correct assumption:
> >>>>>>>>
> >>>>>>>> "While processing the rings (whether they are enabled or not),
> >>>>>>>> client must support changing some configuration aspects on the
> fly."
> >>>>>>>
> >>>>>>> Ok, this doesn't explain how configuration is changed on the fly.
> >>>>>>
> >>>>>> I agree it lacks a bit of clarity.
> >>>>>>
> >>>>>>> As I mentioned, QEMU sends enable message always after
> >>>>>>> configuration
> >>>>>> message.
> >>>>>>
> >>>>>> Yes, but we should not do assumptions on current Qemu version
> >>>>>> when possible. Better to be safe and follow the specification, it
> >>>>>> will be more
> >>>> robust.
> >>>>>> There is also the Virtio-user PMD to take into account for example.
> >>>>>
> >>>>> I understand your point here but do you really want to be ready
> >>>>> for any
> >>>> configuration update in run time?
> >>>>> What does it mean? How datatpath should handle configuration from
> >>>> control thread in run time while traffic is on?
> >>>>> For example, changing queue size \ addresses must stop traffic
> before...
> >>>>> Also changing FDs is very sensitive.
> >>>>>
> >>>>> It doesn't make sense to me.
> >>>>>
> >>>>> Also, according to "on the fly" direction we should not disable
> >>>>> the queue
> >>>> unless enable message is coming to disable it.
> >>>
> >>> No response, so looks like you agree that it doesn't make sense.
> >>
> >> No, my reply was general to all your comments.
> >>
> >> With SW backend, I agree we don't need to disable the rings in case
> >> of asynchronous changes to the ring because we protect it with a
> >> lock, so we are sure the ring won't be accessed by another thread
> >> while doing the change.
> >>
> >> For vDPA case that's more problematic because we have no such locking
> >> mechanism.
> >>
> >> For example memory hotplug, Qemu does not seem to disable the
> queues
> >> so we need to stop the vDPA device one way or another so that it does
> >> not process the rings while the Vhost lib remaps the memory areas.
> >>
> >>>>> In addition:
> >>>>> Do you really want to toggle vDPA drivers\app for any
> >>>>> configuration
> >>>> message? It may cause queue recreation for each one (at least for
> mlx5).
> >>>>
> >>>> I want to have something robust and maintainable.
> >>>
> >>> Me too.
> >>>
> >>>> These messages arriving after a queue have been configured once are
> >>>> rare events, but this is usually the kind of things that cause
> >>>> maintenance
> >> burden.
> >>>
> >>> In case of guest poll mode (testpmd virtio) we all the time get callfd
> twice.
> >>
> >> Right.
> >>
> >>>> If you look at my example patch, you will understand that with my
> >>>> proposal, there won't be any more state change notification than
> >>>> with your proposal when Qemu or any other Vhost-user master send a
> >>>> disable request before sending the request that impact the queue
> state.
> >>>
> >>> we didn't talk about disable time - this one is very simple.
> >>>
> >>> Yes, In case the queue is disabled your proposal doesn't send extra
> >> notification as my.
> >>> But in case the queue is ready, your proposal send extra not ready
> >> notification for kikfd,callfd,set_vring_base configurations.
> >>
> >> I think this is necessary for synchronization with the Vhost-user
> >> master (in case the master asks for this synchronization, like
> >> set_mem_table for instance when reply-ack is enabled).
> >>
> >>>> It just adds more robustness if this unlikely event happens, by
> >>>> invalidating the ring state to not ready before doing the actual
> >>>> ring
> >> configuration change.
> >>>> So that this config change is not missed by the vDPA driver or the
> >> application.
> >>>
> >>> One more issue here is that there is some time that device is ready
> >>> (already
> >> configured) and the first vittq-pair is not ready (your invalidate
> >> proposal for set_vring_base).
> >>
> >>
> >>
> >>> It doesn’t save the concept that device is ready only in case the
> >>> first virtq-
> >> pair is ready.
> >>
> >> I understand the spec as "the device is ready as soon as the first
> >> queue pair is ready", but I might be wrong.
> >>
> >> Do you suggest to call the dev_close() vDPA callback and the
> >> destroy_device() application callback as soon as one of the ring of
> >> the first queue pair receive a disable request or, with my patch,
> >> when one of the rings receives a request that changes the ring state?
> >
> > I means, your proposal actually may make first virtq-pair ready state
> disabled when device ready.
> > So, yes, it leads to call device close\destroy.
> 
> No it doesn't, there is no call to .dev_close()/.destroy_device() with my
> patch if first queue pair gets disabled.
> 
> >>> I will not insist anymore on waiting for enable for notifying
> >>> although I not
> >> fan with it.
> >>>
> >>> So, I suggest to create 1 notification function to be called after
> >>> message
> >> handler and before reply.
> >>> This function is the only one which notify ready states in the next
> options:
> >>>
> >>> 1. virtq ready state is changed in the queue.
> >>> 2. virtq ready state stays on after configuration message handler.
> >>> 3. device state will be enabled when the first queue pair is ready.
> >>
> >> IIUC, it will not disable the queues when there is a state change, is
> >> that correct? If so, I think it does not work with memory hotplug
> >> case I mentioned earlier.
> >
> > It will do enable again which mean - something was modified.
> 
> Ok, thanks for the clarification.
> 
> I think it is not enough for the examples I gave below. For set_mem_table,
> we need to stop the device from processing the vrings before the
> set_mem_table handler calls the munmap(), and re-enable it after the
> mmap() (I did that wrong in my example patch, I just did that after the
> munmap/mmap happened, which is too late).
> 
> >> Even for the callfd double change it can be problematic as Vhost-lib
> >> will close the first one while it will still be used by the driver
> >> (Btw, I see my example patch is also buggy in this regards, it should
> >> reset the call_fd value in the virtqueue, then call
> >> vhost_user_update_vring_state() and finally close the FD).
> >
> > Yes, this one leads for different handle for each message.
> >
> > Maybe it leads for new queue modify operation.
> > So, queue doesn't send the state - just does configuration change on the
> fly.
> >
> > What do you think?
> 
> I think that configuration on the fly doesn't fly.
> We would at least need to stop the device from processing the rings for
> memory hotplug case, so why not just send a disable notification?

Yes, driver need notification here.

> And for the double callfd, that does not look right to me not to request the
> driver to stop using it before it is closed, isn't it?

Yes, and some drivers (include mlx5) may stop the traffic in this case too.

modify\update operation will solve all:

For example:

In memory hotplug:
Do new mmap
Call modify
Do munmup for old.

In callfd\kickfd change:

Set new FD.
Call modify.
Close old FD.

Modify is clearer, save calls and faster (datapath will back faster).


>  Thanks,
> Maxime
> 
> >
> >> Thanks,
> >> Maxime
> >>>
> >>> Matan
> >>>
> >>>
> >>>
> >>>> Maxime
> >>>
> >
  
Maxime Coquelin June 24, 2020, 7:22 a.m. UTC | #23
Good morning Matan,

On 6/24/20 7:54 AM, Matan Azrad wrote:
> Ho Maxime
> 
> Good morning
> 
> From: Maxime Coquelin:
>> On 6/23/20 4:52 PM, Matan Azrad wrote:
>>>
>>>
>>>> -----Original Message-----
>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>> Sent: Tuesday, June 23, 2020 4:56 PM
>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>> <xiao.w.wang@intel.com>
>>>> Cc: dev@dpdk.org
>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>>>
>>>> Hi Matan,
>>>>
>>>> On 6/23/20 1:53 PM, Matan Azrad wrote:
>>>>>
>>>>>
>>>>> From: Maxime Coquelin:
>>>>>> On 6/23/20 11:02 AM, Matan Azrad wrote:
>>>>>>>
>>>>>>>
>>>>>>> From: Maxime Coquelin:
>>>>>>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> From: Maxime Coquelin:
>>>>>>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> From: Maxime Coquelin:
>>>>>>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
>>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>>>> definition
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>>>>>>>>>>>
>>>>>>>>>>>>> Hi Maxime
>>>>>>>>>>>>>
>>>>>>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>>>>>> definition
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>>>>>>>>>>>> The issue is if you only check ready state only before
>>>>>>>>>>>>>>>> and after the message affecting the ring is handled, it
>>>>>>>>>>>>>>>> can be ready at both stages, while the rings have changed
>>>>>>>>>>>>>>>> and state change callback should
>>>>>>>>>>>>>> have been called.
>>>>>>>>>>>>>>> But in this version I checked twice, before message
>>>>>>>>>>>>>>> handler and after
>>>>>>>>>>>>>> message handler, so it should catch any update.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> No, this is not enough, we have to check also during some
>>>>>>>>>>>>>> handlers, so that the ready state is invalidated because
>>>>>>>>>>>>>> sometimes it will be ready before and after the message
>>>>>>>>>>>>>> handler but
>>>>>>>> with different values.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> That's what I did in my example patch:
>>>>>>>>>>>>>> @@ -1847,15 +1892,16 @@
>> vhost_user_set_vring_kick(struct
>>>>>>>>>> virtio_net
>>>>>>>>>>>>>> **pdev, struct VhostUserMsg *msg,
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> ...
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>         if (vq->kickfd >= 0)
>>>>>>>>>>>>>>                 close(vq->kickfd);
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>         vq->kickfd = file.fd;
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Without that, the ready check will return ready before and
>>>>>>>>>>>>>> after the kickfd changed and the driver won't be notified.
>>>>>>>>>>>>>
>>>>>>>>>>>>> The driver will be notified in the next
>>>>>>>>>>>>> VHOST_USER_SET_VRING_ENABLE
>>>>>>>>>>>> message according to v1.
>>>>>>>>>>>>>
>>>>>>>>>>>>> One of our assumption we agreed on in the design mail is
>>>>>>>>>>>>> that it doesn't
>>>>>>>>>>>> make sense that QEMU will change queue configuration
>> without
>>>>>>>>>>>> enabling the queue again.
>>>>>>>>>>>>> Because of that we decided to force calling state callback
>>>>>>>>>>>>> again when
>>>>>>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message
>> even
>>>> if
>>>>>> the
>>>>>>>>>> queue is
>>>>>>>>>>>> already ready.
>>>>>>>>>>>>> So when driver/app see state enable->enable, it should take
>>>>>>>>>>>>> into account
>>>>>>>>>>>> that the queue configuration was probably changed.
>>>>>>>>>>>>>
>>>>>>>>>>>>> I think that this assumption is correct according to the
>>>>>>>>>>>>> QEMU
>>>> code.
>>>>>>>>>>>>
>>>>>>>>>>>> Yes, this was our initial assumption.
>>>>>>>>>>>> But now looking into the details of the implementation, I
>>>>>>>>>>>> find it is even cleaner & clearer not to do this assumption.
>>>>>>>>>>>>
>>>>>>>>>>>>> That's why I prefer to collect all the ready checks
>>>>>>>>>>>>> callbacks (queue state and
>>>>>>>>>>>> device new\conf) to one function that will be called after
>>>>>>>>>>>> the message
>>>>>>>>>>>> handler:
>>>>>>>>>>>>> Pseudo:
>>>>>>>>>>>>>  vhost_user_update_ready_statuses() {
>>>>>>>>>>>>> 	switch (msg):
>>>>>>>>>>>>> 		case enable:
>>>>>>>>>>>>> 			if(enable is 1)
>>>>>>>>>>>>> 				force queue state =1.
>>>>>>>>>>>>> 		case callfd
>>>>>>>>>>>>> 		case kickfd
>>>>>>>>>>>>> 				.....
>>>>>>>>>>>>> 		Check queue and device ready + call callbacks if
>>>>>> needed..
>>>>>>>>>>>>> 		Default
>>>>>>>>>>>>> 			Return;
>>>>>>>>>>>>> }
>>>>>>>>>>>>
>>>>>>>>>>>> I find it more natural to "invalidate" ready state where it
>>>>>>>>>>>> is handled (after vring_invalidate(), before setting new FD
>>>>>>>>>>>> for call & kick, ...)
>>>>>>>>>>>
>>>>>>>>>>> I think that if you go with this direction, if the first queue
>>>>>>>>>>> pair is invalidated,
>>>>>>>>>> you need to notify app\driver also about device ready change.
>>>>>>>>>>> Also it will cause 2 notifications to the driver instead of
>>>>>>>>>>> one in case of FD
>>>>>>>>>> change.
>>>>>>>>>>
>>>>>>>>>> You'll always end-up with two notifications, either Qemu has
>>>>>>>>>> sent the disable and so you'll have one notification for the
>>>>>>>>>> disable and one for the enable, or it didn't sent the disable
>>>>>>>>>> and it will happen at old value invalidation time and after new
>>>>>>>>>> value is taken into
>>>>>> account.
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>> I don't see it in current QEMU behavior.
>>>>>>>>> When working MQ I see that some virtqs get configuration
>> message
>>>>>>>>> while
>>>>>>>> they are in enabled state.
>>>>>>>>> Then, enable message is sent again later.
>>>>>>>>
>>>>>>>> I guess you mean the first queue pair? And it would not be in
>>>>>>>> ready state as it would be the initial configuration of the queue?
>>>>>>>
>>>>>>> Even after initialization when queue is ready.
>>>>>>>
>>>>>>>>>
>>>>>>>>>>> Why not to take this correct assumption and update ready state
>>>>>>>>>>> only in one
>>>>>>>>>> point in the code instead of doing it in all the configuration
>>>>>>>>>> handlers
>>>>>>>> around?
>>>>>>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
>>>>>>>>>>
>>>>>>>>>> I just looked closer at the Vhost-user spec, and I'm no more so
>>>>>>>>>> sure this is a correct assumption:
>>>>>>>>>>
>>>>>>>>>> "While processing the rings (whether they are enabled or not),
>>>>>>>>>> client must support changing some configuration aspects on the
>> fly."
>>>>>>>>>
>>>>>>>>> Ok, this doesn't explain how configuration is changed on the fly.
>>>>>>>>
>>>>>>>> I agree it lacks a bit of clarity.
>>>>>>>>
>>>>>>>>> As I mentioned, QEMU sends enable message always after
>>>>>>>>> configuration
>>>>>>>> message.
>>>>>>>>
>>>>>>>> Yes, but we should not do assumptions on current Qemu version
>>>>>>>> when possible. Better to be safe and follow the specification, it
>>>>>>>> will be more
>>>>>> robust.
>>>>>>>> There is also the Virtio-user PMD to take into account for example.
>>>>>>>
>>>>>>> I understand your point here but do you really want to be ready
>>>>>>> for any
>>>>>> configuration update in run time?
>>>>>>> What does it mean? How datatpath should handle configuration from
>>>>>> control thread in run time while traffic is on?
>>>>>>> For example, changing queue size \ addresses must stop traffic
>> before...
>>>>>>> Also changing FDs is very sensitive.
>>>>>>>
>>>>>>> It doesn't make sense to me.
>>>>>>>
>>>>>>> Also, according to "on the fly" direction we should not disable
>>>>>>> the queue
>>>>>> unless enable message is coming to disable it.
>>>>>
>>>>> No response, so looks like you agree that it doesn't make sense.
>>>>
>>>> No, my reply was general to all your comments.
>>>>
>>>> With SW backend, I agree we don't need to disable the rings in case
>>>> of asynchronous changes to the ring because we protect it with a
>>>> lock, so we are sure the ring won't be accessed by another thread
>>>> while doing the change.
>>>>
>>>> For vDPA case that's more problematic because we have no such locking
>>>> mechanism.
>>>>
>>>> For example memory hotplug, Qemu does not seem to disable the
>> queues
>>>> so we need to stop the vDPA device one way or another so that it does
>>>> not process the rings while the Vhost lib remaps the memory areas.
>>>>
>>>>>>> In addition:
>>>>>>> Do you really want to toggle vDPA drivers\app for any
>>>>>>> configuration
>>>>>> message? It may cause queue recreation for each one (at least for
>> mlx5).
>>>>>>
>>>>>> I want to have something robust and maintainable.
>>>>>
>>>>> Me too.
>>>>>
>>>>>> These messages arriving after a queue have been configured once are
>>>>>> rare events, but this is usually the kind of things that cause
>>>>>> maintenance
>>>> burden.
>>>>>
>>>>> In case of guest poll mode (testpmd virtio) we all the time get callfd
>> twice.
>>>>
>>>> Right.
>>>>
>>>>>> If you look at my example patch, you will understand that with my
>>>>>> proposal, there won't be any more state change notification than
>>>>>> with your proposal when Qemu or any other Vhost-user master send a
>>>>>> disable request before sending the request that impact the queue
>> state.
>>>>>
>>>>> we didn't talk about disable time - this one is very simple.
>>>>>
>>>>> Yes, In case the queue is disabled your proposal doesn't send extra
>>>> notification as my.
>>>>> But in case the queue is ready, your proposal send extra not ready
>>>> notification for kikfd,callfd,set_vring_base configurations.
>>>>
>>>> I think this is necessary for synchronization with the Vhost-user
>>>> master (in case the master asks for this synchronization, like
>>>> set_mem_table for instance when reply-ack is enabled).
>>>>
>>>>>> It just adds more robustness if this unlikely event happens, by
>>>>>> invalidating the ring state to not ready before doing the actual
>>>>>> ring
>>>> configuration change.
>>>>>> So that this config change is not missed by the vDPA driver or the
>>>> application.
>>>>>
>>>>> One more issue here is that there is some time that device is ready
>>>>> (already
>>>> configured) and the first vittq-pair is not ready (your invalidate
>>>> proposal for set_vring_base).
>>>>
>>>>
>>>>
>>>>> It doesn’t save the concept that device is ready only in case the
>>>>> first virtq-
>>>> pair is ready.
>>>>
>>>> I understand the spec as "the device is ready as soon as the first
>>>> queue pair is ready", but I might be wrong.
>>>>
>>>> Do you suggest to call the dev_close() vDPA callback and the
>>>> destroy_device() application callback as soon as one of the ring of
>>>> the first queue pair receive a disable request or, with my patch,
>>>> when one of the rings receives a request that changes the ring state?
>>>
>>> I means, your proposal actually may make first virtq-pair ready state
>> disabled when device ready.
>>> So, yes, it leads to call device close\destroy.
>>
>> No it doesn't, there is no call to .dev_close()/.destroy_device() with my
>> patch if first queue pair gets disabled.
>>
>>>>> I will not insist anymore on waiting for enable for notifying
>>>>> although I not
>>>> fan with it.
>>>>>
>>>>> So, I suggest to create 1 notification function to be called after
>>>>> message
>>>> handler and before reply.
>>>>> This function is the only one which notify ready states in the next
>> options:
>>>>>
>>>>> 1. virtq ready state is changed in the queue.
>>>>> 2. virtq ready state stays on after configuration message handler.
>>>>> 3. device state will be enabled when the first queue pair is ready.
>>>>
>>>> IIUC, it will not disable the queues when there is a state change, is
>>>> that correct? If so, I think it does not work with memory hotplug
>>>> case I mentioned earlier.
>>>
>>> It will do enable again which mean - something was modified.
>>
>> Ok, thanks for the clarification.
>>
>> I think it is not enough for the examples I gave below. For set_mem_table,
>> we need to stop the device from processing the vrings before the
>> set_mem_table handler calls the munmap(), and re-enable it after the
>> mmap() (I did that wrong in my example patch, I just did that after the
>> munmap/mmap happened, which is too late).
>>
>>>> Even for the callfd double change it can be problematic as Vhost-lib
>>>> will close the first one while it will still be used by the driver
>>>> (Btw, I see my example patch is also buggy in this regards, it should
>>>> reset the call_fd value in the virtqueue, then call
>>>> vhost_user_update_vring_state() and finally close the FD).
>>>
>>> Yes, this one leads for different handle for each message.
>>>
>>> Maybe it leads for new queue modify operation.
>>> So, queue doesn't send the state - just does configuration change on the
>> fly.
>>>
>>> What do you think?
>>
>> I think that configuration on the fly doesn't fly.
>> We would at least need to stop the device from processing the rings for
>> memory hotplug case, so why not just send a disable notification?
> 
> Yes, driver need notification here.
> 
>> And for the double callfd, that does not look right to me not to request the
>> driver to stop using it before it is closed, isn't it?
> 
> Yes, and some drivers (include mlx5) may stop the traffic in this case too.
> 
> modify\update operation will solve all:
> 
> For example:
> 
> In memory hotplug:
> Do new mmap
> Call modify
> Do munmup for old.
> 
> In callfd\kickfd change:
> 
> Set new FD.
> Call modify.
> Close old FD.
> 
> Modify is clearer, save calls and faster (datapath will back faster).

It should work, but that is not light modifications to do in
set_mem_table handler (the function is quite complex already with
postcopy live-migration support).

With a modify callback, won't the driver part be more complex? Since it
would have to check which state has changed in the ring, and based on
that decide whether it should stop the ring or not.

As you says that in case of memory hotplug and double callfd, the driver
may stop processing the rings anyway, so would it be that much faster
than disabling/enabling the vring?

These events having a very rare occurrence, does it really matter if
it is a bit longer?

Thanks,
Maxime

> 
>>  Thanks,
>> Maxime
>>
>>>
>>>> Thanks,
>>>> Maxime
>>>>>
>>>>> Matan
>>>>>
>>>>>
>>>>>
>>>>>> Maxime
>>>>>
>>>
>
  
Matan Azrad June 24, 2020, 8:38 a.m. UTC | #24
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, June 24, 2020 10:22 AM
> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> <xiao.w.wang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> 
> Good morning Matan,
> 
> On 6/24/20 7:54 AM, Matan Azrad wrote:
> > Ho Maxime
> >
> > Good morning
> >
> > From: Maxime Coquelin:
> >> On 6/23/20 4:52 PM, Matan Azrad wrote:
> >>>
> >>>
> >>>> -----Original Message-----
> >>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >>>> Sent: Tuesday, June 23, 2020 4:56 PM
> >>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>> <xiao.w.wang@intel.com>
> >>>> Cc: dev@dpdk.org
> >>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
> >>>>
> >>>> Hi Matan,
> >>>>
> >>>> On 6/23/20 1:53 PM, Matan Azrad wrote:
> >>>>>
> >>>>>
> >>>>> From: Maxime Coquelin:
> >>>>>> On 6/23/20 11:02 AM, Matan Azrad wrote:
> >>>>>>>
> >>>>>>>
> >>>>>>> From: Maxime Coquelin:
> >>>>>>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
> >>>>>>>>>
> >>>>>>>>>
> >>>>>>>>> From: Maxime Coquelin:
> >>>>>>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
> >>>>>>>>>>>
> >>>>>>>>>>>
> >>>>>>>>>>> From: Maxime Coquelin:
> >>>>>>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
> >>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>>>>>>>> <xiao.w.wang@intel.com>
> >>>>>>>>>>>> Cc: dev@dpdk.org
> >>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>>>>>>>> definition
> >>>>>>>>>>>>
> >>>>>>>>>>>>
> >>>>>>>>>>>>
> >>>>>>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> Hi Maxime
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> >>>>>>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
> >>>>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
> >>>>>>>>>>>>>> <xiao.w.wang@intel.com>
> >>>>>>>>>>>>>> Cc: dev@dpdk.org
> >>>>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
> >>>>>>>>>>>>>> definition
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
> >>>>>>>>>>>>>>>> The issue is if you only check ready state only before
> >>>>>>>>>>>>>>>> and after the message affecting the ring is handled, it
> >>>>>>>>>>>>>>>> can be ready at both stages, while the rings have
> >>>>>>>>>>>>>>>> changed and state change callback should
> >>>>>>>>>>>>>> have been called.
> >>>>>>>>>>>>>>> But in this version I checked twice, before message
> >>>>>>>>>>>>>>> handler and after
> >>>>>>>>>>>>>> message handler, so it should catch any update.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> No, this is not enough, we have to check also during some
> >>>>>>>>>>>>>> handlers, so that the ready state is invalidated because
> >>>>>>>>>>>>>> sometimes it will be ready before and after the message
> >>>>>>>>>>>>>> handler but
> >>>>>>>> with different values.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> That's what I did in my example patch:
> >>>>>>>>>>>>>> @@ -1847,15 +1892,16 @@
> >> vhost_user_set_vring_kick(struct
> >>>>>>>>>> virtio_net
> >>>>>>>>>>>>>> **pdev, struct VhostUserMsg *msg,
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> ...
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>>         if (vq->kickfd >= 0)
> >>>>>>>>>>>>>>                 close(vq->kickfd);
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>>         vq->kickfd = file.fd;
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> Without that, the ready check will return ready before
> >>>>>>>>>>>>>> and after the kickfd changed and the driver won't be
> notified.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> The driver will be notified in the next
> >>>>>>>>>>>>> VHOST_USER_SET_VRING_ENABLE
> >>>>>>>>>>>> message according to v1.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> One of our assumption we agreed on in the design mail is
> >>>>>>>>>>>>> that it doesn't
> >>>>>>>>>>>> make sense that QEMU will change queue configuration
> >> without
> >>>>>>>>>>>> enabling the queue again.
> >>>>>>>>>>>>> Because of that we decided to force calling state callback
> >>>>>>>>>>>>> again when
> >>>>>>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message
> >> even
> >>>> if
> >>>>>> the
> >>>>>>>>>> queue is
> >>>>>>>>>>>> already ready.
> >>>>>>>>>>>>> So when driver/app see state enable->enable, it should
> >>>>>>>>>>>>> take into account
> >>>>>>>>>>>> that the queue configuration was probably changed.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> I think that this assumption is correct according to the
> >>>>>>>>>>>>> QEMU
> >>>> code.
> >>>>>>>>>>>>
> >>>>>>>>>>>> Yes, this was our initial assumption.
> >>>>>>>>>>>> But now looking into the details of the implementation, I
> >>>>>>>>>>>> find it is even cleaner & clearer not to do this assumption.
> >>>>>>>>>>>>
> >>>>>>>>>>>>> That's why I prefer to collect all the ready checks
> >>>>>>>>>>>>> callbacks (queue state and
> >>>>>>>>>>>> device new\conf) to one function that will be called after
> >>>>>>>>>>>> the message
> >>>>>>>>>>>> handler:
> >>>>>>>>>>>>> Pseudo:
> >>>>>>>>>>>>>  vhost_user_update_ready_statuses() {
> >>>>>>>>>>>>> 	switch (msg):
> >>>>>>>>>>>>> 		case enable:
> >>>>>>>>>>>>> 			if(enable is 1)
> >>>>>>>>>>>>> 				force queue state =1.
> >>>>>>>>>>>>> 		case callfd
> >>>>>>>>>>>>> 		case kickfd
> >>>>>>>>>>>>> 				.....
> >>>>>>>>>>>>> 		Check queue and device ready + call callbacks
> if
> >>>>>> needed..
> >>>>>>>>>>>>> 		Default
> >>>>>>>>>>>>> 			Return;
> >>>>>>>>>>>>> }
> >>>>>>>>>>>>
> >>>>>>>>>>>> I find it more natural to "invalidate" ready state where it
> >>>>>>>>>>>> is handled (after vring_invalidate(), before setting new FD
> >>>>>>>>>>>> for call & kick, ...)
> >>>>>>>>>>>
> >>>>>>>>>>> I think that if you go with this direction, if the first
> >>>>>>>>>>> queue pair is invalidated,
> >>>>>>>>>> you need to notify app\driver also about device ready change.
> >>>>>>>>>>> Also it will cause 2 notifications to the driver instead of
> >>>>>>>>>>> one in case of FD
> >>>>>>>>>> change.
> >>>>>>>>>>
> >>>>>>>>>> You'll always end-up with two notifications, either Qemu has
> >>>>>>>>>> sent the disable and so you'll have one notification for the
> >>>>>>>>>> disable and one for the enable, or it didn't sent the disable
> >>>>>>>>>> and it will happen at old value invalidation time and after
> >>>>>>>>>> new value is taken into
> >>>>>> account.
> >>>>>>>>>>
> >>>>>>>>>
> >>>>>>>>> I don't see it in current QEMU behavior.
> >>>>>>>>> When working MQ I see that some virtqs get configuration
> >> message
> >>>>>>>>> while
> >>>>>>>> they are in enabled state.
> >>>>>>>>> Then, enable message is sent again later.
> >>>>>>>>
> >>>>>>>> I guess you mean the first queue pair? And it would not be in
> >>>>>>>> ready state as it would be the initial configuration of the queue?
> >>>>>>>
> >>>>>>> Even after initialization when queue is ready.
> >>>>>>>
> >>>>>>>>>
> >>>>>>>>>>> Why not to take this correct assumption and update ready
> >>>>>>>>>>> state only in one
> >>>>>>>>>> point in the code instead of doing it in all the
> >>>>>>>>>> configuration handlers
> >>>>>>>> around?
> >>>>>>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
> >>>>>>>>>>
> >>>>>>>>>> I just looked closer at the Vhost-user spec, and I'm no more
> >>>>>>>>>> so sure this is a correct assumption:
> >>>>>>>>>>
> >>>>>>>>>> "While processing the rings (whether they are enabled or
> >>>>>>>>>> not), client must support changing some configuration aspects
> >>>>>>>>>> on the
> >> fly."
> >>>>>>>>>
> >>>>>>>>> Ok, this doesn't explain how configuration is changed on the fly.
> >>>>>>>>
> >>>>>>>> I agree it lacks a bit of clarity.
> >>>>>>>>
> >>>>>>>>> As I mentioned, QEMU sends enable message always after
> >>>>>>>>> configuration
> >>>>>>>> message.
> >>>>>>>>
> >>>>>>>> Yes, but we should not do assumptions on current Qemu version
> >>>>>>>> when possible. Better to be safe and follow the specification,
> >>>>>>>> it will be more
> >>>>>> robust.
> >>>>>>>> There is also the Virtio-user PMD to take into account for
> example.
> >>>>>>>
> >>>>>>> I understand your point here but do you really want to be ready
> >>>>>>> for any
> >>>>>> configuration update in run time?
> >>>>>>> What does it mean? How datatpath should handle configuration
> >>>>>>> from
> >>>>>> control thread in run time while traffic is on?
> >>>>>>> For example, changing queue size \ addresses must stop traffic
> >> before...
> >>>>>>> Also changing FDs is very sensitive.
> >>>>>>>
> >>>>>>> It doesn't make sense to me.
> >>>>>>>
> >>>>>>> Also, according to "on the fly" direction we should not disable
> >>>>>>> the queue
> >>>>>> unless enable message is coming to disable it.
> >>>>>
> >>>>> No response, so looks like you agree that it doesn't make sense.
> >>>>
> >>>> No, my reply was general to all your comments.
> >>>>
> >>>> With SW backend, I agree we don't need to disable the rings in case
> >>>> of asynchronous changes to the ring because we protect it with a
> >>>> lock, so we are sure the ring won't be accessed by another thread
> >>>> while doing the change.
> >>>>
> >>>> For vDPA case that's more problematic because we have no such
> >>>> locking mechanism.
> >>>>
> >>>> For example memory hotplug, Qemu does not seem to disable the
> >> queues
> >>>> so we need to stop the vDPA device one way or another so that it
> >>>> does not process the rings while the Vhost lib remaps the memory
> areas.
> >>>>
> >>>>>>> In addition:
> >>>>>>> Do you really want to toggle vDPA drivers\app for any
> >>>>>>> configuration
> >>>>>> message? It may cause queue recreation for each one (at least for
> >> mlx5).
> >>>>>>
> >>>>>> I want to have something robust and maintainable.
> >>>>>
> >>>>> Me too.
> >>>>>
> >>>>>> These messages arriving after a queue have been configured once
> >>>>>> are rare events, but this is usually the kind of things that
> >>>>>> cause maintenance
> >>>> burden.
> >>>>>
> >>>>> In case of guest poll mode (testpmd virtio) we all the time get
> >>>>> callfd
> >> twice.
> >>>>
> >>>> Right.
> >>>>
> >>>>>> If you look at my example patch, you will understand that with my
> >>>>>> proposal, there won't be any more state change notification than
> >>>>>> with your proposal when Qemu or any other Vhost-user master
> send
> >>>>>> a disable request before sending the request that impact the
> >>>>>> queue
> >> state.
> >>>>>
> >>>>> we didn't talk about disable time - this one is very simple.
> >>>>>
> >>>>> Yes, In case the queue is disabled your proposal doesn't send
> >>>>> extra
> >>>> notification as my.
> >>>>> But in case the queue is ready, your proposal send extra not ready
> >>>> notification for kikfd,callfd,set_vring_base configurations.
> >>>>
> >>>> I think this is necessary for synchronization with the Vhost-user
> >>>> master (in case the master asks for this synchronization, like
> >>>> set_mem_table for instance when reply-ack is enabled).
> >>>>
> >>>>>> It just adds more robustness if this unlikely event happens, by
> >>>>>> invalidating the ring state to not ready before doing the actual
> >>>>>> ring
> >>>> configuration change.
> >>>>>> So that this config change is not missed by the vDPA driver or
> >>>>>> the
> >>>> application.
> >>>>>
> >>>>> One more issue here is that there is some time that device is
> >>>>> ready (already
> >>>> configured) and the first vittq-pair is not ready (your invalidate
> >>>> proposal for set_vring_base).
> >>>>
> >>>>
> >>>>
> >>>>> It doesn’t save the concept that device is ready only in case the
> >>>>> first virtq-
> >>>> pair is ready.
> >>>>
> >>>> I understand the spec as "the device is ready as soon as the first
> >>>> queue pair is ready", but I might be wrong.
> >>>>
> >>>> Do you suggest to call the dev_close() vDPA callback and the
> >>>> destroy_device() application callback as soon as one of the ring of
> >>>> the first queue pair receive a disable request or, with my patch,
> >>>> when one of the rings receives a request that changes the ring state?
> >>>
> >>> I means, your proposal actually may make first virtq-pair ready
> >>> state
> >> disabled when device ready.
> >>> So, yes, it leads to call device close\destroy.
> >>
> >> No it doesn't, there is no call to .dev_close()/.destroy_device()
> >> with my patch if first queue pair gets disabled.
> >>
> >>>>> I will not insist anymore on waiting for enable for notifying
> >>>>> although I not
> >>>> fan with it.
> >>>>>
> >>>>> So, I suggest to create 1 notification function to be called after
> >>>>> message
> >>>> handler and before reply.
> >>>>> This function is the only one which notify ready states in the
> >>>>> next
> >> options:
> >>>>>
> >>>>> 1. virtq ready state is changed in the queue.
> >>>>> 2. virtq ready state stays on after configuration message handler.
> >>>>> 3. device state will be enabled when the first queue pair is ready.
> >>>>
> >>>> IIUC, it will not disable the queues when there is a state change,
> >>>> is that correct? If so, I think it does not work with memory
> >>>> hotplug case I mentioned earlier.
> >>>
> >>> It will do enable again which mean - something was modified.
> >>
> >> Ok, thanks for the clarification.
> >>
> >> I think it is not enough for the examples I gave below. For
> >> set_mem_table, we need to stop the device from processing the vrings
> >> before the set_mem_table handler calls the munmap(), and re-enable it
> >> after the
> >> mmap() (I did that wrong in my example patch, I just did that after
> >> the munmap/mmap happened, which is too late).
> >>
> >>>> Even for the callfd double change it can be problematic as
> >>>> Vhost-lib will close the first one while it will still be used by
> >>>> the driver (Btw, I see my example patch is also buggy in this
> >>>> regards, it should reset the call_fd value in the virtqueue, then
> >>>> call
> >>>> vhost_user_update_vring_state() and finally close the FD).
> >>>
> >>> Yes, this one leads for different handle for each message.
> >>>
> >>> Maybe it leads for new queue modify operation.
> >>> So, queue doesn't send the state - just does configuration change on
> >>> the
> >> fly.
> >>>
> >>> What do you think?
> >>
> >> I think that configuration on the fly doesn't fly.
> >> We would at least need to stop the device from processing the rings
> >> for memory hotplug case, so why not just send a disable notification?
> >
> > Yes, driver need notification here.
> >
> >> And for the double callfd, that does not look right to me not to
> >> request the driver to stop using it before it is closed, isn't it?
> >
> > Yes, and some drivers (include mlx5) may stop the traffic in this case too.
> >
> > modify\update operation will solve all:
> >
> > For example:
> >
> > In memory hotplug:
> > Do new mmap
> > Call modify
> > Do munmup for old.
> >
> > In callfd\kickfd change:
> >
> > Set new FD.
> > Call modify.
> > Close old FD.
> >
> > Modify is clearer, save calls and faster (datapath will back faster).
> 
> It should work, but that is not light modifications to do in set_mem_table
> handler (the function is quite complex already with postcopy live-migration
> support).
> 
> With a modify callback, won't the driver part be more complex? Since it
> would have to check which state has changed in the ring, and based on that
> decide whether it should stop the ring or not.
> 
> As you says that in case of memory hotplug and double callfd, the driver may
> stop processing the rings anyway, so would it be that much faster than
> disabling/enabling the vring?
> 
> These events having a very rare occurrence, does it really matter if it is a bit
> longer?


Just thinking again about memory hotplug:

Mlx5 driver device need to be reinitialized in this case because the NIC has memory translation which must be updated before the virtqs creation.

So, maybe we need to close and config the vDPA device in this case.

@Xiao Wang, can you comment the IFC behavior here.

Matan


> Thanks,
> Maxime
> 
> >
> >>  Thanks,
> >> Maxime
> >>
> >>>
> >>>> Thanks,
> >>>> Maxime
> >>>>>
> >>>>> Matan
> >>>>>
> >>>>>
> >>>>>
> >>>>>> Maxime
> >>>>>
> >>>
> >
  
Maxime Coquelin June 24, 2020, 9:12 a.m. UTC | #25
On 6/24/20 10:38 AM, Matan Azrad wrote:
> 
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Wednesday, June 24, 2020 10:22 AM
>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>> <xiao.w.wang@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>
>> Good morning Matan,
>>
>> On 6/24/20 7:54 AM, Matan Azrad wrote:
>>> Ho Maxime
>>>
>>> Good morning
>>>
>>> From: Maxime Coquelin:
>>>> On 6/23/20 4:52 PM, Matan Azrad wrote:
>>>>>
>>>>>
>>>>>> -----Original Message-----
>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>> Sent: Tuesday, June 23, 2020 4:56 PM
>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>> <xiao.w.wang@intel.com>
>>>>>> Cc: dev@dpdk.org
>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready definition
>>>>>>
>>>>>> Hi Matan,
>>>>>>
>>>>>> On 6/23/20 1:53 PM, Matan Azrad wrote:
>>>>>>>
>>>>>>>
>>>>>>> From: Maxime Coquelin:
>>>>>>>> On 6/23/20 11:02 AM, Matan Azrad wrote:
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> From: Maxime Coquelin:
>>>>>>>>>> On 6/22/20 5:51 PM, Matan Azrad wrote:
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> From: Maxime Coquelin:
>>>>>>>>>>>> On 6/22/20 3:43 PM, Matan Azrad wrote:
>>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>> From: Maxime Coquelin:
>>>>>>>>>>>>>> Sent: Monday, June 22, 2020 3:33 PM
>>>>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>>>>>> definition
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> On 6/22/20 12:06 PM, Matan Azrad wrote:
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> Hi Maxime
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>>>>>>>>>>>>>>>> Sent: Monday, June 22, 2020 11:56 AM
>>>>>>>>>>>>>>>> To: Matan Azrad <matan@mellanox.com>; Xiao Wang
>>>>>>>>>>>>>>>> <xiao.w.wang@intel.com>
>>>>>>>>>>>>>>>> Cc: dev@dpdk.org
>>>>>>>>>>>>>>>> Subject: Re: [PATCH v1 3/4] vhost: improve device ready
>>>>>>>>>>>>>>>> definition
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> On 6/22/20 10:41 AM, Matan Azrad wrote:
>>>>>>>>>>>>>>>>>> The issue is if you only check ready state only before
>>>>>>>>>>>>>>>>>> and after the message affecting the ring is handled, it
>>>>>>>>>>>>>>>>>> can be ready at both stages, while the rings have
>>>>>>>>>>>>>>>>>> changed and state change callback should
>>>>>>>>>>>>>>>> have been called.
>>>>>>>>>>>>>>>>> But in this version I checked twice, before message
>>>>>>>>>>>>>>>>> handler and after
>>>>>>>>>>>>>>>> message handler, so it should catch any update.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> No, this is not enough, we have to check also during some
>>>>>>>>>>>>>>>> handlers, so that the ready state is invalidated because
>>>>>>>>>>>>>>>> sometimes it will be ready before and after the message
>>>>>>>>>>>>>>>> handler but
>>>>>>>>>> with different values.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> That's what I did in my example patch:
>>>>>>>>>>>>>>>> @@ -1847,15 +1892,16 @@
>>>> vhost_user_set_vring_kick(struct
>>>>>>>>>>>> virtio_net
>>>>>>>>>>>>>>>> **pdev, struct VhostUserMsg *msg,
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> ...
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>         if (vq->kickfd >= 0)
>>>>>>>>>>>>>>>>                 close(vq->kickfd);
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>> +       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>> +       vhost_user_update_vring_state(dev, file.index);
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>         vq->kickfd = file.fd;
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> Without that, the ready check will return ready before
>>>>>>>>>>>>>>>> and after the kickfd changed and the driver won't be
>> notified.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> The driver will be notified in the next
>>>>>>>>>>>>>>> VHOST_USER_SET_VRING_ENABLE
>>>>>>>>>>>>>> message according to v1.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> One of our assumption we agreed on in the design mail is
>>>>>>>>>>>>>>> that it doesn't
>>>>>>>>>>>>>> make sense that QEMU will change queue configuration
>>>> without
>>>>>>>>>>>>>> enabling the queue again.
>>>>>>>>>>>>>>> Because of that we decided to force calling state callback
>>>>>>>>>>>>>>> again when
>>>>>>>>>>>>>> QEMU send VHOST_USER_SET_VRING_ENABLE(1) message
>>>> even
>>>>>> if
>>>>>>>> the
>>>>>>>>>>>> queue is
>>>>>>>>>>>>>> already ready.
>>>>>>>>>>>>>>> So when driver/app see state enable->enable, it should
>>>>>>>>>>>>>>> take into account
>>>>>>>>>>>>>> that the queue configuration was probably changed.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> I think that this assumption is correct according to the
>>>>>>>>>>>>>>> QEMU
>>>>>> code.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Yes, this was our initial assumption.
>>>>>>>>>>>>>> But now looking into the details of the implementation, I
>>>>>>>>>>>>>> find it is even cleaner & clearer not to do this assumption.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> That's why I prefer to collect all the ready checks
>>>>>>>>>>>>>>> callbacks (queue state and
>>>>>>>>>>>>>> device new\conf) to one function that will be called after
>>>>>>>>>>>>>> the message
>>>>>>>>>>>>>> handler:
>>>>>>>>>>>>>>> Pseudo:
>>>>>>>>>>>>>>>  vhost_user_update_ready_statuses() {
>>>>>>>>>>>>>>> 	switch (msg):
>>>>>>>>>>>>>>> 		case enable:
>>>>>>>>>>>>>>> 			if(enable is 1)
>>>>>>>>>>>>>>> 				force queue state =1.
>>>>>>>>>>>>>>> 		case callfd
>>>>>>>>>>>>>>> 		case kickfd
>>>>>>>>>>>>>>> 				.....
>>>>>>>>>>>>>>> 		Check queue and device ready + call callbacks
>> if
>>>>>>>> needed..
>>>>>>>>>>>>>>> 		Default
>>>>>>>>>>>>>>> 			Return;
>>>>>>>>>>>>>>> }
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> I find it more natural to "invalidate" ready state where it
>>>>>>>>>>>>>> is handled (after vring_invalidate(), before setting new FD
>>>>>>>>>>>>>> for call & kick, ...)
>>>>>>>>>>>>>
>>>>>>>>>>>>> I think that if you go with this direction, if the first
>>>>>>>>>>>>> queue pair is invalidated,
>>>>>>>>>>>> you need to notify app\driver also about device ready change.
>>>>>>>>>>>>> Also it will cause 2 notifications to the driver instead of
>>>>>>>>>>>>> one in case of FD
>>>>>>>>>>>> change.
>>>>>>>>>>>>
>>>>>>>>>>>> You'll always end-up with two notifications, either Qemu has
>>>>>>>>>>>> sent the disable and so you'll have one notification for the
>>>>>>>>>>>> disable and one for the enable, or it didn't sent the disable
>>>>>>>>>>>> and it will happen at old value invalidation time and after
>>>>>>>>>>>> new value is taken into
>>>>>>>> account.
>>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> I don't see it in current QEMU behavior.
>>>>>>>>>>> When working MQ I see that some virtqs get configuration
>>>> message
>>>>>>>>>>> while
>>>>>>>>>> they are in enabled state.
>>>>>>>>>>> Then, enable message is sent again later.
>>>>>>>>>>
>>>>>>>>>> I guess you mean the first queue pair? And it would not be in
>>>>>>>>>> ready state as it would be the initial configuration of the queue?
>>>>>>>>>
>>>>>>>>> Even after initialization when queue is ready.
>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>>> Why not to take this correct assumption and update ready
>>>>>>>>>>>>> state only in one
>>>>>>>>>>>> point in the code instead of doing it in all the
>>>>>>>>>>>> configuration handlers
>>>>>>>>>> around?
>>>>>>>>>>>>> IMO, It is correct, less intrusive, simpler, clearer and cleaner.
>>>>>>>>>>>>
>>>>>>>>>>>> I just looked closer at the Vhost-user spec, and I'm no more
>>>>>>>>>>>> so sure this is a correct assumption:
>>>>>>>>>>>>
>>>>>>>>>>>> "While processing the rings (whether they are enabled or
>>>>>>>>>>>> not), client must support changing some configuration aspects
>>>>>>>>>>>> on the
>>>> fly."
>>>>>>>>>>>
>>>>>>>>>>> Ok, this doesn't explain how configuration is changed on the fly.
>>>>>>>>>>
>>>>>>>>>> I agree it lacks a bit of clarity.
>>>>>>>>>>
>>>>>>>>>>> As I mentioned, QEMU sends enable message always after
>>>>>>>>>>> configuration
>>>>>>>>>> message.
>>>>>>>>>>
>>>>>>>>>> Yes, but we should not do assumptions on current Qemu version
>>>>>>>>>> when possible. Better to be safe and follow the specification,
>>>>>>>>>> it will be more
>>>>>>>> robust.
>>>>>>>>>> There is also the Virtio-user PMD to take into account for
>> example.
>>>>>>>>>
>>>>>>>>> I understand your point here but do you really want to be ready
>>>>>>>>> for any
>>>>>>>> configuration update in run time?
>>>>>>>>> What does it mean? How datatpath should handle configuration
>>>>>>>>> from
>>>>>>>> control thread in run time while traffic is on?
>>>>>>>>> For example, changing queue size \ addresses must stop traffic
>>>> before...
>>>>>>>>> Also changing FDs is very sensitive.
>>>>>>>>>
>>>>>>>>> It doesn't make sense to me.
>>>>>>>>>
>>>>>>>>> Also, according to "on the fly" direction we should not disable
>>>>>>>>> the queue
>>>>>>>> unless enable message is coming to disable it.
>>>>>>>
>>>>>>> No response, so looks like you agree that it doesn't make sense.
>>>>>>
>>>>>> No, my reply was general to all your comments.
>>>>>>
>>>>>> With SW backend, I agree we don't need to disable the rings in case
>>>>>> of asynchronous changes to the ring because we protect it with a
>>>>>> lock, so we are sure the ring won't be accessed by another thread
>>>>>> while doing the change.
>>>>>>
>>>>>> For vDPA case that's more problematic because we have no such
>>>>>> locking mechanism.
>>>>>>
>>>>>> For example memory hotplug, Qemu does not seem to disable the
>>>> queues
>>>>>> so we need to stop the vDPA device one way or another so that it
>>>>>> does not process the rings while the Vhost lib remaps the memory
>> areas.
>>>>>>
>>>>>>>>> In addition:
>>>>>>>>> Do you really want to toggle vDPA drivers\app for any
>>>>>>>>> configuration
>>>>>>>> message? It may cause queue recreation for each one (at least for
>>>> mlx5).
>>>>>>>>
>>>>>>>> I want to have something robust and maintainable.
>>>>>>>
>>>>>>> Me too.
>>>>>>>
>>>>>>>> These messages arriving after a queue have been configured once
>>>>>>>> are rare events, but this is usually the kind of things that
>>>>>>>> cause maintenance
>>>>>> burden.
>>>>>>>
>>>>>>> In case of guest poll mode (testpmd virtio) we all the time get
>>>>>>> callfd
>>>> twice.
>>>>>>
>>>>>> Right.
>>>>>>
>>>>>>>> If you look at my example patch, you will understand that with my
>>>>>>>> proposal, there won't be any more state change notification than
>>>>>>>> with your proposal when Qemu or any other Vhost-user master
>> send
>>>>>>>> a disable request before sending the request that impact the
>>>>>>>> queue
>>>> state.
>>>>>>>
>>>>>>> we didn't talk about disable time - this one is very simple.
>>>>>>>
>>>>>>> Yes, In case the queue is disabled your proposal doesn't send
>>>>>>> extra
>>>>>> notification as my.
>>>>>>> But in case the queue is ready, your proposal send extra not ready
>>>>>> notification for kikfd,callfd,set_vring_base configurations.
>>>>>>
>>>>>> I think this is necessary for synchronization with the Vhost-user
>>>>>> master (in case the master asks for this synchronization, like
>>>>>> set_mem_table for instance when reply-ack is enabled).
>>>>>>
>>>>>>>> It just adds more robustness if this unlikely event happens, by
>>>>>>>> invalidating the ring state to not ready before doing the actual
>>>>>>>> ring
>>>>>> configuration change.
>>>>>>>> So that this config change is not missed by the vDPA driver or
>>>>>>>> the
>>>>>> application.
>>>>>>>
>>>>>>> One more issue here is that there is some time that device is
>>>>>>> ready (already
>>>>>> configured) and the first vittq-pair is not ready (your invalidate
>>>>>> proposal for set_vring_base).
>>>>>>
>>>>>>
>>>>>>
>>>>>>> It doesn’t save the concept that device is ready only in case the
>>>>>>> first virtq-
>>>>>> pair is ready.
>>>>>>
>>>>>> I understand the spec as "the device is ready as soon as the first
>>>>>> queue pair is ready", but I might be wrong.
>>>>>>
>>>>>> Do you suggest to call the dev_close() vDPA callback and the
>>>>>> destroy_device() application callback as soon as one of the ring of
>>>>>> the first queue pair receive a disable request or, with my patch,
>>>>>> when one of the rings receives a request that changes the ring state?
>>>>>
>>>>> I means, your proposal actually may make first virtq-pair ready
>>>>> state
>>>> disabled when device ready.
>>>>> So, yes, it leads to call device close\destroy.
>>>>
>>>> No it doesn't, there is no call to .dev_close()/.destroy_device()
>>>> with my patch if first queue pair gets disabled.
>>>>
>>>>>>> I will not insist anymore on waiting for enable for notifying
>>>>>>> although I not
>>>>>> fan with it.
>>>>>>>
>>>>>>> So, I suggest to create 1 notification function to be called after
>>>>>>> message
>>>>>> handler and before reply.
>>>>>>> This function is the only one which notify ready states in the
>>>>>>> next
>>>> options:
>>>>>>>
>>>>>>> 1. virtq ready state is changed in the queue.
>>>>>>> 2. virtq ready state stays on after configuration message handler.
>>>>>>> 3. device state will be enabled when the first queue pair is ready.
>>>>>>
>>>>>> IIUC, it will not disable the queues when there is a state change,
>>>>>> is that correct? If so, I think it does not work with memory
>>>>>> hotplug case I mentioned earlier.
>>>>>
>>>>> It will do enable again which mean - something was modified.
>>>>
>>>> Ok, thanks for the clarification.
>>>>
>>>> I think it is not enough for the examples I gave below. For
>>>> set_mem_table, we need to stop the device from processing the vrings
>>>> before the set_mem_table handler calls the munmap(), and re-enable it
>>>> after the
>>>> mmap() (I did that wrong in my example patch, I just did that after
>>>> the munmap/mmap happened, which is too late).
>>>>
>>>>>> Even for the callfd double change it can be problematic as
>>>>>> Vhost-lib will close the first one while it will still be used by
>>>>>> the driver (Btw, I see my example patch is also buggy in this
>>>>>> regards, it should reset the call_fd value in the virtqueue, then
>>>>>> call
>>>>>> vhost_user_update_vring_state() and finally close the FD).
>>>>>
>>>>> Yes, this one leads for different handle for each message.
>>>>>
>>>>> Maybe it leads for new queue modify operation.
>>>>> So, queue doesn't send the state - just does configuration change on
>>>>> the
>>>> fly.
>>>>>
>>>>> What do you think?
>>>>
>>>> I think that configuration on the fly doesn't fly.
>>>> We would at least need to stop the device from processing the rings
>>>> for memory hotplug case, so why not just send a disable notification?
>>>
>>> Yes, driver need notification here.
>>>
>>>> And for the double callfd, that does not look right to me not to
>>>> request the driver to stop using it before it is closed, isn't it?
>>>
>>> Yes, and some drivers (include mlx5) may stop the traffic in this case too.
>>>
>>> modify\update operation will solve all:
>>>
>>> For example:
>>>
>>> In memory hotplug:
>>> Do new mmap
>>> Call modify
>>> Do munmup for old.
>>>
>>> In callfd\kickfd change:
>>>
>>> Set new FD.
>>> Call modify.
>>> Close old FD.
>>>
>>> Modify is clearer, save calls and faster (datapath will back faster).
>>
>> It should work, but that is not light modifications to do in set_mem_table
>> handler (the function is quite complex already with postcopy live-migration
>> support).
>>
>> With a modify callback, won't the driver part be more complex? Since it
>> would have to check which state has changed in the ring, and based on that
>> decide whether it should stop the ring or not.
>>
>> As you says that in case of memory hotplug and double callfd, the driver may
>> stop processing the rings anyway, so would it be that much faster than
>> disabling/enabling the vring?
>>
>> These events having a very rare occurrence, does it really matter if it is a bit
>> longer?
> 
> 
> Just thinking again about memory hotplug:
> 
> Mlx5 driver device need to be reinitialized in this case because the NIC has memory translation which must be updated before the virtqs creation.
> 
> So, maybe we need to close and config the vDPA device in this case.

Right, disabling vrings is not enough for memory hotplug.

It would make sense to call dev_close and dev_conf here, that's the most
conservative approach.

> @Xiao Wang, can you comment the IFC behavior here.
> 
> Matan
> 
> 
>> Thanks,
>> Maxime
>>
>>>
>>>>  Thanks,
>>>> Maxime
>>>>
>>>>>
>>>>>> Thanks,
>>>>>> Maxime
>>>>>>>
>>>>>>> Matan
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>>> Maxime
>>>>>>>
>>>>>
>>>
>
  

Patch

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index b0849b9..cfd5f27 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1295,7 +1295,7 @@ 
 {
 	bool rings_ok;
 
-	if (!vq)
+	if (!vq || !vq->enabled)
 		return false;
 
 	if (vq_is_packed(dev))
@@ -1309,24 +1309,27 @@ 
 	       vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
 }
 
+#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
+
 static int
 virtio_is_ready(struct virtio_net *dev)
 {
 	struct vhost_virtqueue *vq;
 	uint32_t i;
 
-	if (dev->nr_vring == 0)
+	if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
 		return 0;
 
-	for (i = 0; i < dev->nr_vring; i++) {
+	for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
 		vq = dev->virtqueue[i];
 
 		if (!vq_is_ready(dev, vq))
 			return 0;
 	}
 
-	VHOST_LOG_CONFIG(INFO,
-		"virtio is now ready for processing.\n");
+	if (!(dev->flags & VIRTIO_DEV_READY))
+		VHOST_LOG_CONFIG(INFO,
+			"virtio is now ready for processing.\n");
 	return 1;
 }
 
@@ -1970,8 +1973,6 @@  static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
 	struct virtio_net *dev = *pdev;
 	int enable = (int)msg->payload.state.num;
 	int index = (int)msg->payload.state.index;
-	struct rte_vdpa_device *vdpa_dev;
-	int did = -1;
 
 	if (validate_msg_fds(msg, 0) != 0)
 		return RTE_VHOST_MSG_RESULT_ERR;
@@ -1980,15 +1981,6 @@  static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
 		"set queue enable: %d to qp idx: %d\n",
 		enable, index);
 
-	did = dev->vdpa_dev_id;
-	vdpa_dev = rte_vdpa_get_device(did);
-	if (vdpa_dev && vdpa_dev->ops->set_vring_state)
-		vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
-
-	if (dev->notify_ops->vring_state_changed)
-		dev->notify_ops->vring_state_changed(dev->vid,
-				index, enable);
-
 	/* On disable, rings have to be stopped being processed. */
 	if (!enable && dev->dequeue_zero_copy)
 		drain_zmbuf_list(dev->virtqueue[index]);
@@ -2622,11 +2614,13 @@  typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
 	struct virtio_net *dev;
 	struct VhostUserMsg msg;
 	struct rte_vdpa_device *vdpa_dev;
+	bool ready[VHOST_MAX_VRING];
 	int did = -1;
 	int ret;
 	int unlock_required = 0;
 	bool handled;
 	int request;
+	uint32_t i;
 
 	dev = get_device(vid);
 	if (dev == NULL)
@@ -2668,6 +2662,10 @@  typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
 		VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
 	}
 
+	/* Save ready status for all the VQs before message handle. */
+	for (i = 0; i < VHOST_MAX_VRING; i++)
+		ready[i] = vq_is_ready(dev, dev->virtqueue[i]);
+
 	ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
 	if (ret < 0) {
 		VHOST_LOG_CONFIG(ERR,
@@ -2802,6 +2800,25 @@  typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
 		return -1;
 	}
 
+	did = dev->vdpa_dev_id;
+	vdpa_dev = rte_vdpa_get_device(did);
+	/* Update ready status. */
+	for (i = 0; i < VHOST_MAX_VRING; i++) {
+		bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]);
+
+		if ((cur_ready && request == VHOST_USER_SET_VRING_ENABLE &&
+				i == msg.payload.state.index) ||
+				cur_ready != ready[i]) {
+			if (vdpa_dev && vdpa_dev->ops->set_vring_state)
+				vdpa_dev->ops->set_vring_state(dev->vid, i,
+								(int)cur_ready);
+
+			if (dev->notify_ops->vring_state_changed)
+				dev->notify_ops->vring_state_changed(dev->vid,
+							i, (int)cur_ready);
+		}
+	}
+
 	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
 		dev->flags |= VIRTIO_DEV_READY;
 
@@ -2816,8 +2833,6 @@  typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
 		}
 	}
 
-	did = dev->vdpa_dev_id;
-	vdpa_dev = rte_vdpa_get_device(did);
 	if (vdpa_dev && virtio_is_ready(dev) &&
 			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
 			msg.request.master == VHOST_USER_SET_VRING_CALL) {