[v2,2/3] vhost: call vDPA callback at the end of vring enable handler
Checks
Commit Message
vDPA's set_vring_state callback would need to know the virtqueues'
enable status to configure the hardware.
Signed-off-by: Xiaolong Ye <xiaolong.ye@intel.com>
Signed-off-by: Andy Pei <andy.pei@intel.com>
---
v2:
add nr_active_vring as a parameter to ops function set_vring_state in
case of callback in set_vring_state() and avoid exposing new API.
lib/librte_vhost/rte_vdpa.h | 4 ++--
lib/librte_vhost/vhost_user.c | 27 +++++++++++++++++++++++++--
2 files changed, 27 insertions(+), 4 deletions(-)
Comments
On Tue, Sep 17, 2019 at 05:09:47PM +0800, Andy Pei wrote:
> vDPA's set_vring_state callback would need to know the virtqueues'
> enable status to configure the hardware.
>
> Signed-off-by: Xiaolong Ye <xiaolong.ye@intel.com>
> Signed-off-by: Andy Pei <andy.pei@intel.com>
> ---
> v2:
> add nr_active_vring as a parameter to ops function set_vring_state in
> case of callback in set_vring_state() and avoid exposing new API.
>
> lib/librte_vhost/rte_vdpa.h | 4 ++--
> lib/librte_vhost/vhost_user.c | 27 +++++++++++++++++++++++++--
> 2 files changed, 27 insertions(+), 4 deletions(-)
>
> diff --git a/lib/librte_vhost/rte_vdpa.h b/lib/librte_vhost/rte_vdpa.h
> index 9a3deb3..6e55d4d 100644
> --- a/lib/librte_vhost/rte_vdpa.h
> +++ b/lib/librte_vhost/rte_vdpa.h
> @@ -54,8 +54,8 @@ struct rte_vdpa_dev_ops {
> int (*dev_conf)(int vid);
> int (*dev_close)(int vid);
>
> - /** Enable/disable this vring */
> - int (*set_vring_state)(int vid, int vring, int state);
> + /** Enable/disable vring queue pairs */
> + int (*set_vring_state)(int vid, int nr_active_vring);
We should avoid changing the API/ABI unless we have a very good
justification.
With the existing API, it should be easy to get the number of
active rings by maintaining a bitmap or something similar in
ifc driver.
Besides, please keep other maintainers got from get-maintainer.sh
in the Cc list as well.
Thanks,
Tiwei
>
> /** Set features when changed */
> int (*set_features)(int vid);
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index 0b72648..4d7de44 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -1325,6 +1325,25 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
> return RTE_VHOST_MSG_RESULT_REPLY;
> }
>
> +static uint16_t
> +vhost_get_active_vring_num(int vid)
> +{
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t qid;
> +
> + if (dev == NULL)
> + return 0;
> +
> + for (qid = 0; qid < dev->nr_vring; qid++) {
> + vq = dev->virtqueue[qid];
> + if (!vq->enabled)
> + break;
> + }
> +
> + return qid;
> +}
> +
> /*
> * when virtio queues are ready to work, qemu will send us to
> * enable the virtio queue pair.
> @@ -1339,6 +1358,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
> int index = (int)msg->payload.state.index;
> struct rte_vdpa_device *vdpa_dev;
> int did = -1;
> + int nr_active_vring;
>
> RTE_LOG(INFO, VHOST_CONFIG,
> "set queue enable: %d to qp idx: %d\n",
> @@ -1346,8 +1366,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
>
> did = dev->vdpa_dev_id;
> vdpa_dev = rte_vdpa_get_device(did);
> - if (vdpa_dev && vdpa_dev->ops->set_vring_state)
> - vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
>
> if (dev->notify_ops->vring_state_changed)
> dev->notify_ops->vring_state_changed(dev->vid,
> @@ -1359,6 +1377,11 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
>
> dev->virtqueue[index]->enabled = enable;
>
> + if (vdpa_dev && vdpa_dev->ops->set_vring_state) {
> + nr_active_vring = vhost_get_active_vring_num(dev->vid);
> + vdpa_dev->ops->set_vring_state(dev->vid, nr_active_vring);
> + }
> +
> return RTE_VHOST_MSG_RESULT_OK;
> }
>
> --
> 1.8.3.1
>
On 9/23/2019 9:12 AM, Tiwei Bie wrote:
> On Tue, Sep 17, 2019 at 05:09:47PM +0800, Andy Pei wrote:
>> vDPA's set_vring_state callback would need to know the virtqueues'
>> enable status to configure the hardware.
>>
>> Signed-off-by: Xiaolong Ye <xiaolong.ye@intel.com>
>> Signed-off-by: Andy Pei <andy.pei@intel.com>
>> ---
>> v2:
>> add nr_active_vring as a parameter to ops function set_vring_state in
>> case of callback in set_vring_state() and avoid exposing new API.
>>
>> lib/librte_vhost/rte_vdpa.h | 4 ++--
>> lib/librte_vhost/vhost_user.c | 27 +++++++++++++++++++++++++--
>> 2 files changed, 27 insertions(+), 4 deletions(-)
>>
>> diff --git a/lib/librte_vhost/rte_vdpa.h b/lib/librte_vhost/rte_vdpa.h
>> index 9a3deb3..6e55d4d 100644
>> --- a/lib/librte_vhost/rte_vdpa.h
>> +++ b/lib/librte_vhost/rte_vdpa.h
>> @@ -54,8 +54,8 @@ struct rte_vdpa_dev_ops {
>> int (*dev_conf)(int vid);
>> int (*dev_close)(int vid);
>>
>> - /** Enable/disable this vring */
>> - int (*set_vring_state)(int vid, int vring, int state);
>> + /** Enable/disable vring queue pairs */
>> + int (*set_vring_state)(int vid, int nr_active_vring);
>
> We should avoid changing the API/ABI unless we have a very good
> justification.
>
> With the existing API, it should be easy to get the number of
> active rings by maintaining a bitmap or something similar in
> ifc driver.
>
> Besides, please keep other maintainers got from get-maintainer.sh
> in the Cc list as well.
>
updating patchset [1] as "Change Requested" based on above comment.
[1]
https://patches.dpdk.org/user/todo/dpdk/?series=6424&delegate=319&state=*
@@ -54,8 +54,8 @@ struct rte_vdpa_dev_ops {
int (*dev_conf)(int vid);
int (*dev_close)(int vid);
- /** Enable/disable this vring */
- int (*set_vring_state)(int vid, int vring, int state);
+ /** Enable/disable vring queue pairs */
+ int (*set_vring_state)(int vid, int nr_active_vring);
/** Set features when changed */
int (*set_features)(int vid);
@@ -1325,6 +1325,25 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
return RTE_VHOST_MSG_RESULT_REPLY;
}
+static uint16_t
+vhost_get_active_vring_num(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t qid;
+
+ if (dev == NULL)
+ return 0;
+
+ for (qid = 0; qid < dev->nr_vring; qid++) {
+ vq = dev->virtqueue[qid];
+ if (!vq->enabled)
+ break;
+ }
+
+ return qid;
+}
+
/*
* when virtio queues are ready to work, qemu will send us to
* enable the virtio queue pair.
@@ -1339,6 +1358,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
int index = (int)msg->payload.state.index;
struct rte_vdpa_device *vdpa_dev;
int did = -1;
+ int nr_active_vring;
RTE_LOG(INFO, VHOST_CONFIG,
"set queue enable: %d to qp idx: %d\n",
@@ -1346,8 +1366,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
did = dev->vdpa_dev_id;
vdpa_dev = rte_vdpa_get_device(did);
- if (vdpa_dev && vdpa_dev->ops->set_vring_state)
- vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
if (dev->notify_ops->vring_state_changed)
dev->notify_ops->vring_state_changed(dev->vid,
@@ -1359,6 +1377,11 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
dev->virtqueue[index]->enabled = enable;
+ if (vdpa_dev && vdpa_dev->ops->set_vring_state) {
+ nr_active_vring = vhost_get_active_vring_num(dev->vid);
+ vdpa_dev->ops->set_vring_state(dev->vid, nr_active_vring);
+ }
+
return RTE_VHOST_MSG_RESULT_OK;
}