[v3,6/8] vdpa/ifc: support dynamic enable/disable queue

Message ID 1663308990-621-7-git-send-email-andy.pei@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vdpa/ifc: add multi queue support |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Pei, Andy Sept. 16, 2022, 6:16 a.m. UTC
  From: Huang Wei <wei.huang@intel.com>

Support dynamic enable or disable queue.
For front end, like QEMU, user can use ethtool to configurate queue.
For example, "ethtool -L eth0 combined 3" to enable 3 queues pairs.

Signed-off-by: Huang Wei <wei.huang@intel.com>
Signed-off-by: Andy Pei <andy.pei@intel.com>
---
 drivers/vdpa/ifc/base/ifcvf.c | 99 +++++++++++++++++++++++++++++++++++++++++++
 drivers/vdpa/ifc/base/ifcvf.h |  6 +++
 drivers/vdpa/ifc/ifcvf_vdpa.c | 93 +++++++++++++++++++++++++++++++++-------
 3 files changed, 183 insertions(+), 15 deletions(-)
  

Comments

Chenbo Xia Oct. 12, 2022, 8:19 a.m. UTC | #1
> -----Original Message-----
> From: Pei, Andy <andy.pei@intel.com>
> Sent: Friday, September 16, 2022 2:16 PM
> To: dev@dpdk.org
> Cc: Xia, Chenbo <chenbo.xia@intel.com>; Xu, Rosen <rosen.xu@intel.com>;
> Huang, Wei <wei.huang@intel.com>; Cao, Gang <gang.cao@intel.com>;
> maxime.coquelin@redhat.com
> Subject: [PATCH v3 6/8] vdpa/ifc: support dynamic enable/disable queue
> 
> From: Huang Wei <wei.huang@intel.com>
> 
> Support dynamic enable or disable queue.
> For front end, like QEMU, user can use ethtool to configurate queue.

configure

> For example, "ethtool -L eth0 combined 3" to enable 3 queues pairs.
> 
> Signed-off-by: Huang Wei <wei.huang@intel.com>
> Signed-off-by: Andy Pei <andy.pei@intel.com>
> ---
>  drivers/vdpa/ifc/base/ifcvf.c | 99
> +++++++++++++++++++++++++++++++++++++++++++
>  drivers/vdpa/ifc/base/ifcvf.h |  6 +++
>  drivers/vdpa/ifc/ifcvf_vdpa.c | 93 +++++++++++++++++++++++++++++++++-----
> --
>  3 files changed, 183 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c
> index 619b034..792d258 100644
> --- a/drivers/vdpa/ifc/base/ifcvf.c
> +++ b/drivers/vdpa/ifc/base/ifcvf.c
> @@ -227,6 +227,105 @@
>  	}
>  }
> 
> +int
> +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i)
> +{
> +	struct ifcvf_pci_common_cfg *cfg;
> +	u8 *lm_cfg;
> +	u16 notify_off;
> +	int msix_vector;
> +
> +	if (i >= (int)hw->nr_vring)
> +		return -1;
> +
> +	cfg = hw->common_cfg;
> +	if (!cfg) {
> +		WARNINGOUT("common_cfg in HW is NULL.\n");

This should be error log

> +		return -1;
> +	}
> +
> +	ifcvf_enable_multiqueue(hw);
> +
> +	IFCVF_WRITE_REG16(i, &cfg->queue_select);
> +	msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector);
> +	if (msix_vector != (i + 1)) {
> +		IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
> +		msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector);
> +		if (msix_vector == IFCVF_MSI_NO_VECTOR) {
> +			WARNINGOUT("queue %u, msix vec alloc failed\n", i);

Ditto. And %u -> %d

Same for the function ifcvf_disable_vring_hw.

Thanks,
Chenbo

> +			return -1;
> +		}
> +	}
> +
> +	io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> +			&cfg->queue_desc_hi);
> +	io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> +			&cfg->queue_avail_hi);
> +	io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> +			&cfg->queue_used_hi);
> +	IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
> +
> +	lm_cfg = hw->lm_cfg;
> +	if (lm_cfg) {
> +		if (hw->device_type == IFCVF_BLK)
> +			*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
> +				i * IFCVF_LM_CFG_SIZE) =
> +				(u32)hw->vring[i].last_avail_idx |
> +				((u32)hw->vring[i].last_used_idx << 16);
> +		else
> +			*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
> +				(i / 2) * IFCVF_LM_CFG_SIZE +
> +				(i % 2) * 4) =
> +				(u32)hw->vring[i].last_avail_idx |
> +				((u32)hw->vring[i].last_used_idx << 16);
> +	}
> +
> +	notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
> +	hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
> +			notify_off * hw->notify_off_multiplier);
> +	IFCVF_WRITE_REG16(1, &cfg->queue_enable);
> +
> +	return 0;
> +}
> +
> +void
> +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i)
> +{
> +	struct ifcvf_pci_common_cfg *cfg;
> +	u32 ring_state;
> +	u8 *lm_cfg;
> +
> +	if (i >= (int)hw->nr_vring)
> +		return;
> +
> +	cfg = hw->common_cfg;
> +	if (!cfg) {
> +		WARNINGOUT("common_cfg in HW is NULL.\n");
> +		return;
> +	}
> +
> +	IFCVF_WRITE_REG16(i, &cfg->queue_select);
> +	IFCVF_WRITE_REG16(0, &cfg->queue_enable);
> +
> +	lm_cfg = hw->lm_cfg;
> +	if (lm_cfg) {
> +		if (hw->device_type == IFCVF_BLK) {
> +			ring_state = *(u32 *)(lm_cfg +
> +					IFCVF_LM_RING_STATE_OFFSET +
> +					i * IFCVF_LM_CFG_SIZE);
> +			hw->vring[i].last_avail_idx =
> +				(u16)(ring_state & IFCVF_16_BIT_MASK);
> +		} else {
> +			ring_state = *(u32 *)(lm_cfg +
> +					IFCVF_LM_RING_STATE_OFFSET +
> +					(i / 2) * IFCVF_LM_CFG_SIZE +
> +					(i % 2) * 4);
> +			hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
> +		}
> +		hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
> +	}
> +}
> +
>  STATIC int
>  ifcvf_hw_enable(struct ifcvf_hw *hw)
>  {
> diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h
> index 1e133c0..3726da7 100644
> --- a/drivers/vdpa/ifc/base/ifcvf.h
> +++ b/drivers/vdpa/ifc/base/ifcvf.h
> @@ -164,6 +164,12 @@ struct ifcvf_hw {
>  ifcvf_get_features(struct ifcvf_hw *hw);
> 
>  int
> +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i);
> +
> +void
> +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i);
> +
> +int
>  ifcvf_start_hw(struct ifcvf_hw *hw);
> 
>  void
> diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
> index b00afdb..32bc1c9 100644
> --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
> +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
> @@ -1282,13 +1282,59 @@ struct rte_vdpa_dev_info {
>  }
> 
>  static int
> +ifcvf_config_vring(struct ifcvf_internal *internal, int vring)
> +{
> +	struct ifcvf_hw *hw = &internal->hw;
> +	int vid = internal->vid;
> +	struct rte_vhost_vring vq;
> +	uint64_t gpa;
> +
> +	if (hw->vring[vring].enable) {
> +		rte_vhost_get_vhost_vring(vid, vring, &vq);
> +		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
> +		if (gpa == 0) {
> +			DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
> +			return -1;
> +		}
> +		hw->vring[vring].desc = gpa;
> +
> +		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
> +		if (gpa == 0) {
> +			DRV_LOG(ERR, "Fail to get GPA for available ring.");
> +			return -1;
> +		}
> +		hw->vring[vring].avail = gpa;
> +
> +		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
> +		if (gpa == 0) {
> +			DRV_LOG(ERR, "Fail to get GPA for used ring.");
> +			return -1;
> +		}
> +		hw->vring[vring].used = gpa;
> +
> +		hw->vring[vring].size = vq.size;
> +		rte_vhost_get_vring_base(vid, vring,
> +				&hw->vring[vring].last_avail_idx,
> +				&hw->vring[vring].last_used_idx);
> +		ifcvf_enable_vring_hw(&internal->hw, vring);
> +	} else {
> +		ifcvf_disable_vring_hw(&internal->hw, vring);
> +		rte_vhost_set_vring_base(vid, vring,
> +				hw->vring[vring].last_avail_idx,
> +				hw->vring[vring].last_used_idx);
> +	}
> +
> +	return 0;
> +}
> +
> +static int
>  ifcvf_set_vring_state(int vid, int vring, int state)
>  {
>  	struct rte_vdpa_device *vdev;
>  	struct internal_list *list;
>  	struct ifcvf_internal *internal;
>  	struct ifcvf_hw *hw;
> -	struct ifcvf_pci_common_cfg *cfg;
> +	bool enable = !!state;
>  	int ret = 0;
> 
>  	vdev = rte_vhost_get_vdpa_device(vid);
> @@ -1298,6 +1344,9 @@ struct rte_vdpa_dev_info {
>  		return -1;
>  	}
> 
> +	DRV_LOG(INFO, "%s queue %d of vDPA device %s",
> +		enable ? "enable" : "disable", vring, vdev->device->name);
> +
>  	internal = list->internal;
>  	if (vring < 0 || vring >= internal->max_queues * 2) {
>  		DRV_LOG(ERR, "Vring index %d not correct", vring);
> @@ -1305,27 +1354,41 @@ struct rte_vdpa_dev_info {
>  	}
> 
>  	hw = &internal->hw;
> +	hw->vring[vring].enable = enable;
> +
>  	if (!internal->configured)
> -		goto exit;
> +		return 0;
> 
> -	cfg = hw->common_cfg;
> -	IFCVF_WRITE_REG16(vring, &cfg->queue_select);
> -	IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
> +	unset_notify_relay(internal);
> 
> -	if (!state && hw->vring[vring].enable) {
> -		ret = vdpa_disable_vfio_intr(internal);
> -		if (ret)
> -			return ret;
> +	ret = vdpa_enable_vfio_intr(internal, false);
> +	if (ret) {
> +		DRV_LOG(ERR, "failed to set vfio interrupt of vDPA device %s",
> +			vdev->device->name);
> +		return ret;
>  	}
> 
> -	if (state && !hw->vring[vring].enable) {
> -		ret = vdpa_enable_vfio_intr(internal, false);
> -		if (ret)
> -			return ret;
> +	ret = ifcvf_config_vring(internal, vring);
> +	if (ret) {
> +		DRV_LOG(ERR, "failed to configure queue %d of vDPA device %s",
> +			vring, vdev->device->name);
> +		return ret;
> +	}
> +
> +	ret = setup_notify_relay(internal);
> +	if (ret) {
> +		DRV_LOG(ERR, "failed to setup notify relay of vDPA device %s",
> +			vdev->device->name);
> +		return ret;
> +	}
> +
> +	ret = rte_vhost_host_notifier_ctrl(vid, vring, enable);
> +	if (ret) {
> +		DRV_LOG(ERR, "vDPA device %s queue %d host notifier ctrl fail",
> +			vdev->device->name, vring);
> +		return ret;
>  	}
> 
> -exit:
> -	hw->vring[vring].enable = !!state;
>  	return 0;
>  }
> 
> --
> 1.8.3.1
  
Pei, Andy Oct. 12, 2022, 11 a.m. UTC | #2
Hi Chenbo,

Thanks for your reply.
My reply is inline.

> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Wednesday, October 12, 2022 4:20 PM
> To: Pei, Andy <andy.pei@intel.com>; dev@dpdk.org
> Cc: Xu, Rosen <rosen.xu@intel.com>; Huang, Wei <wei.huang@intel.com>;
> Cao, Gang <gang.cao@intel.com>; maxime.coquelin@redhat.com
> Subject: RE: [PATCH v3 6/8] vdpa/ifc: support dynamic enable/disable queue
> 
> > -----Original Message-----
> > From: Pei, Andy <andy.pei@intel.com>
> > Sent: Friday, September 16, 2022 2:16 PM
> > To: dev@dpdk.org
> > Cc: Xia, Chenbo <chenbo.xia@intel.com>; Xu, Rosen
> > <rosen.xu@intel.com>; Huang, Wei <wei.huang@intel.com>; Cao, Gang
> > <gang.cao@intel.com>; maxime.coquelin@redhat.com
> > Subject: [PATCH v3 6/8] vdpa/ifc: support dynamic enable/disable queue
> >
> > From: Huang Wei <wei.huang@intel.com>
> >
> > Support dynamic enable or disable queue.
> > For front end, like QEMU, user can use ethtool to configurate queue.
> 
> configure
> 
Fix in next version.
> > For example, "ethtool -L eth0 combined 3" to enable 3 queues pairs.
> >
> > Signed-off-by: Huang Wei <wei.huang@intel.com>
> > Signed-off-by: Andy Pei <andy.pei@intel.com>
> > ---
> >  drivers/vdpa/ifc/base/ifcvf.c | 99
> > +++++++++++++++++++++++++++++++++++++++++++
> >  drivers/vdpa/ifc/base/ifcvf.h |  6 +++  drivers/vdpa/ifc/ifcvf_vdpa.c
> > | 93 +++++++++++++++++++++++++++++++++-----
> > --
> >  3 files changed, 183 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/vdpa/ifc/base/ifcvf.c
> > b/drivers/vdpa/ifc/base/ifcvf.c index 619b034..792d258 100644
> > --- a/drivers/vdpa/ifc/base/ifcvf.c
> > +++ b/drivers/vdpa/ifc/base/ifcvf.c
> > @@ -227,6 +227,105 @@
> >  	}
> >  }
> >
> > +int
> > +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i) {
> > +	struct ifcvf_pci_common_cfg *cfg;
> > +	u8 *lm_cfg;
> > +	u16 notify_off;
> > +	int msix_vector;
> > +
> > +	if (i >= (int)hw->nr_vring)
> > +		return -1;
> > +
> > +	cfg = hw->common_cfg;
> > +	if (!cfg) {
> > +		WARNINGOUT("common_cfg in HW is NULL.\n");
> 
> This should be error log
> 
> > +		return -1;
> > +	}
> > +
> > +	ifcvf_enable_multiqueue(hw);
> > +
> > +	IFCVF_WRITE_REG16(i, &cfg->queue_select);
> > +	msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector);
> > +	if (msix_vector != (i + 1)) {
> > +		IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
> > +		msix_vector = IFCVF_READ_REG16(&cfg-
> >queue_msix_vector);
> > +		if (msix_vector == IFCVF_MSI_NO_VECTOR) {
> > +			WARNINGOUT("queue %u, msix vec alloc failed\n", i);
> 
> Ditto. And %u -> %d
> 
> Same for the function ifcvf_disable_vring_hw.
> 
I will use 
RTE_LOG(ERR, PMD, "common_cfg in HW is NULL.\n");

> Thanks,
> Chenbo
> 
> > +			return -1;
> > +		}
> > +	}
> > +
> > +	io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> > +			&cfg->queue_desc_hi);
> > +	io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> > +			&cfg->queue_avail_hi);
> > +	io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> > +			&cfg->queue_used_hi);
> > +	IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
> > +
> > +	lm_cfg = hw->lm_cfg;
> > +	if (lm_cfg) {
> > +		if (hw->device_type == IFCVF_BLK)
> > +			*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
> > +				i * IFCVF_LM_CFG_SIZE) =
> > +				(u32)hw->vring[i].last_avail_idx |
> > +				((u32)hw->vring[i].last_used_idx << 16);
> > +		else
> > +			*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
> > +				(i / 2) * IFCVF_LM_CFG_SIZE +
> > +				(i % 2) * 4) =
> > +				(u32)hw->vring[i].last_avail_idx |
> > +				((u32)hw->vring[i].last_used_idx << 16);
> > +	}
> > +
> > +	notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
> > +	hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
> > +			notify_off * hw->notify_off_multiplier);
> > +	IFCVF_WRITE_REG16(1, &cfg->queue_enable);
> > +
> > +	return 0;
> > +}
> > +
> > +void
> > +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i) {
> > +	struct ifcvf_pci_common_cfg *cfg;
> > +	u32 ring_state;
> > +	u8 *lm_cfg;
> > +
> > +	if (i >= (int)hw->nr_vring)
> > +		return;
> > +
> > +	cfg = hw->common_cfg;
> > +	if (!cfg) {
> > +		WARNINGOUT("common_cfg in HW is NULL.\n");
> > +		return;
> > +	}
> > +
> > +	IFCVF_WRITE_REG16(i, &cfg->queue_select);
> > +	IFCVF_WRITE_REG16(0, &cfg->queue_enable);
> > +
> > +	lm_cfg = hw->lm_cfg;
> > +	if (lm_cfg) {
> > +		if (hw->device_type == IFCVF_BLK) {
> > +			ring_state = *(u32 *)(lm_cfg +
> > +					IFCVF_LM_RING_STATE_OFFSET +
> > +					i * IFCVF_LM_CFG_SIZE);
> > +			hw->vring[i].last_avail_idx =
> > +				(u16)(ring_state & IFCVF_16_BIT_MASK);
> > +		} else {
> > +			ring_state = *(u32 *)(lm_cfg +
> > +					IFCVF_LM_RING_STATE_OFFSET +
> > +					(i / 2) * IFCVF_LM_CFG_SIZE +
> > +					(i % 2) * 4);
> > +			hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
> > +		}
> > +		hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
> > +	}
> > +}
> > +
> >  STATIC int
> >  ifcvf_hw_enable(struct ifcvf_hw *hw)
> >  {
> > diff --git a/drivers/vdpa/ifc/base/ifcvf.h
> > b/drivers/vdpa/ifc/base/ifcvf.h index 1e133c0..3726da7 100644
> > --- a/drivers/vdpa/ifc/base/ifcvf.h
> > +++ b/drivers/vdpa/ifc/base/ifcvf.h
> > @@ -164,6 +164,12 @@ struct ifcvf_hw {  ifcvf_get_features(struct
> > ifcvf_hw *hw);
> >
> >  int
> > +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i);
> > +
> > +void
> > +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i);
> > +
> > +int
> >  ifcvf_start_hw(struct ifcvf_hw *hw);
> >
> >  void
> > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c
> > b/drivers/vdpa/ifc/ifcvf_vdpa.c index b00afdb..32bc1c9 100644
> > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
> > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
> > @@ -1282,13 +1282,59 @@ struct rte_vdpa_dev_info {  }
> >
> >  static int
> > +ifcvf_config_vring(struct ifcvf_internal *internal, int vring) {
> > +	struct ifcvf_hw *hw = &internal->hw;
> > +	int vid = internal->vid;
> > +	struct rte_vhost_vring vq;
> > +	uint64_t gpa;
> > +
> > +	if (hw->vring[vring].enable) {
> > +		rte_vhost_get_vhost_vring(vid, vring, &vq);
> > +		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
> > +		if (gpa == 0) {
> > +			DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
> > +			return -1;
> > +		}
> > +		hw->vring[vring].desc = gpa;
> > +
> > +		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
> > +		if (gpa == 0) {
> > +			DRV_LOG(ERR, "Fail to get GPA for available ring.");
> > +			return -1;
> > +		}
> > +		hw->vring[vring].avail = gpa;
> > +
> > +		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
> > +		if (gpa == 0) {
> > +			DRV_LOG(ERR, "Fail to get GPA for used ring.");
> > +			return -1;
> > +		}
> > +		hw->vring[vring].used = gpa;
> > +
> > +		hw->vring[vring].size = vq.size;
> > +		rte_vhost_get_vring_base(vid, vring,
> > +				&hw->vring[vring].last_avail_idx,
> > +				&hw->vring[vring].last_used_idx);
> > +		ifcvf_enable_vring_hw(&internal->hw, vring);
> > +	} else {
> > +		ifcvf_disable_vring_hw(&internal->hw, vring);
> > +		rte_vhost_set_vring_base(vid, vring,
> > +				hw->vring[vring].last_avail_idx,
> > +				hw->vring[vring].last_used_idx);
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> >  ifcvf_set_vring_state(int vid, int vring, int state)  {
> >  	struct rte_vdpa_device *vdev;
> >  	struct internal_list *list;
> >  	struct ifcvf_internal *internal;
> >  	struct ifcvf_hw *hw;
> > -	struct ifcvf_pci_common_cfg *cfg;
> > +	bool enable = !!state;
> >  	int ret = 0;
> >
> >  	vdev = rte_vhost_get_vdpa_device(vid); @@ -1298,6 +1344,9 @@
> struct
> > rte_vdpa_dev_info {
> >  		return -1;
> >  	}
> >
> > +	DRV_LOG(INFO, "%s queue %d of vDPA device %s",
> > +		enable ? "enable" : "disable", vring, vdev->device->name);
> > +
> >  	internal = list->internal;
> >  	if (vring < 0 || vring >= internal->max_queues * 2) {
> >  		DRV_LOG(ERR, "Vring index %d not correct", vring); @@ -
> 1305,27
> > +1354,41 @@ struct rte_vdpa_dev_info {
> >  	}
> >
> >  	hw = &internal->hw;
> > +	hw->vring[vring].enable = enable;
> > +
> >  	if (!internal->configured)
> > -		goto exit;
> > +		return 0;
> >
> > -	cfg = hw->common_cfg;
> > -	IFCVF_WRITE_REG16(vring, &cfg->queue_select);
> > -	IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
> > +	unset_notify_relay(internal);
> >
> > -	if (!state && hw->vring[vring].enable) {
> > -		ret = vdpa_disable_vfio_intr(internal);
> > -		if (ret)
> > -			return ret;
> > +	ret = vdpa_enable_vfio_intr(internal, false);
> > +	if (ret) {
> > +		DRV_LOG(ERR, "failed to set vfio interrupt of vDPA
> device %s",
> > +			vdev->device->name);
> > +		return ret;
> >  	}
> >
> > -	if (state && !hw->vring[vring].enable) {
> > -		ret = vdpa_enable_vfio_intr(internal, false);
> > -		if (ret)
> > -			return ret;
> > +	ret = ifcvf_config_vring(internal, vring);
> > +	if (ret) {
> > +		DRV_LOG(ERR, "failed to configure queue %d of vDPA
> device %s",
> > +			vring, vdev->device->name);
> > +		return ret;
> > +	}
> > +
> > +	ret = setup_notify_relay(internal);
> > +	if (ret) {
> > +		DRV_LOG(ERR, "failed to setup notify relay of vDPA
> device %s",
> > +			vdev->device->name);
> > +		return ret;
> > +	}
> > +
> > +	ret = rte_vhost_host_notifier_ctrl(vid, vring, enable);
> > +	if (ret) {
> > +		DRV_LOG(ERR, "vDPA device %s queue %d host notifier ctrl
> fail",
> > +			vdev->device->name, vring);
> > +		return ret;
> >  	}
> >
> > -exit:
> > -	hw->vring[vring].enable = !!state;
> >  	return 0;
> >  }
> >
> > --
> > 1.8.3.1
  

Patch

diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c
index 619b034..792d258 100644
--- a/drivers/vdpa/ifc/base/ifcvf.c
+++ b/drivers/vdpa/ifc/base/ifcvf.c
@@ -227,6 +227,105 @@ 
 	}
 }
 
+int
+ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i)
+{
+	struct ifcvf_pci_common_cfg *cfg;
+	u8 *lm_cfg;
+	u16 notify_off;
+	int msix_vector;
+
+	if (i >= (int)hw->nr_vring)
+		return -1;
+
+	cfg = hw->common_cfg;
+	if (!cfg) {
+		WARNINGOUT("common_cfg in HW is NULL.\n");
+		return -1;
+	}
+
+	ifcvf_enable_multiqueue(hw);
+
+	IFCVF_WRITE_REG16(i, &cfg->queue_select);
+	msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector);
+	if (msix_vector != (i + 1)) {
+		IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
+		msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector);
+		if (msix_vector == IFCVF_MSI_NO_VECTOR) {
+			WARNINGOUT("queue %u, msix vec alloc failed\n", i);
+			return -1;
+		}
+	}
+
+	io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+			&cfg->queue_desc_hi);
+	io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+			&cfg->queue_avail_hi);
+	io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+			&cfg->queue_used_hi);
+	IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
+
+	lm_cfg = hw->lm_cfg;
+	if (lm_cfg) {
+		if (hw->device_type == IFCVF_BLK)
+			*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+				i * IFCVF_LM_CFG_SIZE) =
+				(u32)hw->vring[i].last_avail_idx |
+				((u32)hw->vring[i].last_used_idx << 16);
+		else
+			*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+				(i / 2) * IFCVF_LM_CFG_SIZE +
+				(i % 2) * 4) =
+				(u32)hw->vring[i].last_avail_idx |
+				((u32)hw->vring[i].last_used_idx << 16);
+	}
+
+	notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
+	hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
+			notify_off * hw->notify_off_multiplier);
+	IFCVF_WRITE_REG16(1, &cfg->queue_enable);
+
+	return 0;
+}
+
+void
+ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i)
+{
+	struct ifcvf_pci_common_cfg *cfg;
+	u32 ring_state;
+	u8 *lm_cfg;
+
+	if (i >= (int)hw->nr_vring)
+		return;
+
+	cfg = hw->common_cfg;
+	if (!cfg) {
+		WARNINGOUT("common_cfg in HW is NULL.\n");
+		return;
+	}
+
+	IFCVF_WRITE_REG16(i, &cfg->queue_select);
+	IFCVF_WRITE_REG16(0, &cfg->queue_enable);
+
+	lm_cfg = hw->lm_cfg;
+	if (lm_cfg) {
+		if (hw->device_type == IFCVF_BLK) {
+			ring_state = *(u32 *)(lm_cfg +
+					IFCVF_LM_RING_STATE_OFFSET +
+					i * IFCVF_LM_CFG_SIZE);
+			hw->vring[i].last_avail_idx =
+				(u16)(ring_state & IFCVF_16_BIT_MASK);
+		} else {
+			ring_state = *(u32 *)(lm_cfg +
+					IFCVF_LM_RING_STATE_OFFSET +
+					(i / 2) * IFCVF_LM_CFG_SIZE +
+					(i % 2) * 4);
+			hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
+		}
+		hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
+	}
+}
+
 STATIC int
 ifcvf_hw_enable(struct ifcvf_hw *hw)
 {
diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h
index 1e133c0..3726da7 100644
--- a/drivers/vdpa/ifc/base/ifcvf.h
+++ b/drivers/vdpa/ifc/base/ifcvf.h
@@ -164,6 +164,12 @@  struct ifcvf_hw {
 ifcvf_get_features(struct ifcvf_hw *hw);
 
 int
+ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i);
+
+void
+ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i);
+
+int
 ifcvf_start_hw(struct ifcvf_hw *hw);
 
 void
diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
index b00afdb..32bc1c9 100644
--- a/drivers/vdpa/ifc/ifcvf_vdpa.c
+++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
@@ -1282,13 +1282,59 @@  struct rte_vdpa_dev_info {
 }
 
 static int
+ifcvf_config_vring(struct ifcvf_internal *internal, int vring)
+{
+	struct ifcvf_hw *hw = &internal->hw;
+	int vid = internal->vid;
+	struct rte_vhost_vring vq;
+	uint64_t gpa;
+
+	if (hw->vring[vring].enable) {
+		rte_vhost_get_vhost_vring(vid, vring, &vq);
+		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+		if (gpa == 0) {
+			DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
+			return -1;
+		}
+		hw->vring[vring].desc = gpa;
+
+		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+		if (gpa == 0) {
+			DRV_LOG(ERR, "Fail to get GPA for available ring.");
+			return -1;
+		}
+		hw->vring[vring].avail = gpa;
+
+		gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+		if (gpa == 0) {
+			DRV_LOG(ERR, "Fail to get GPA for used ring.");
+			return -1;
+		}
+		hw->vring[vring].used = gpa;
+
+		hw->vring[vring].size = vq.size;
+		rte_vhost_get_vring_base(vid, vring,
+				&hw->vring[vring].last_avail_idx,
+				&hw->vring[vring].last_used_idx);
+		ifcvf_enable_vring_hw(&internal->hw, vring);
+	} else {
+		ifcvf_disable_vring_hw(&internal->hw, vring);
+		rte_vhost_set_vring_base(vid, vring,
+				hw->vring[vring].last_avail_idx,
+				hw->vring[vring].last_used_idx);
+	}
+
+	return 0;
+}
+
+static int
 ifcvf_set_vring_state(int vid, int vring, int state)
 {
 	struct rte_vdpa_device *vdev;
 	struct internal_list *list;
 	struct ifcvf_internal *internal;
 	struct ifcvf_hw *hw;
-	struct ifcvf_pci_common_cfg *cfg;
+	bool enable = !!state;
 	int ret = 0;
 
 	vdev = rte_vhost_get_vdpa_device(vid);
@@ -1298,6 +1344,9 @@  struct rte_vdpa_dev_info {
 		return -1;
 	}
 
+	DRV_LOG(INFO, "%s queue %d of vDPA device %s",
+		enable ? "enable" : "disable", vring, vdev->device->name);
+
 	internal = list->internal;
 	if (vring < 0 || vring >= internal->max_queues * 2) {
 		DRV_LOG(ERR, "Vring index %d not correct", vring);
@@ -1305,27 +1354,41 @@  struct rte_vdpa_dev_info {
 	}
 
 	hw = &internal->hw;
+	hw->vring[vring].enable = enable;
+
 	if (!internal->configured)
-		goto exit;
+		return 0;
 
-	cfg = hw->common_cfg;
-	IFCVF_WRITE_REG16(vring, &cfg->queue_select);
-	IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
+	unset_notify_relay(internal);
 
-	if (!state && hw->vring[vring].enable) {
-		ret = vdpa_disable_vfio_intr(internal);
-		if (ret)
-			return ret;
+	ret = vdpa_enable_vfio_intr(internal, false);
+	if (ret) {
+		DRV_LOG(ERR, "failed to set vfio interrupt of vDPA device %s",
+			vdev->device->name);
+		return ret;
 	}
 
-	if (state && !hw->vring[vring].enable) {
-		ret = vdpa_enable_vfio_intr(internal, false);
-		if (ret)
-			return ret;
+	ret = ifcvf_config_vring(internal, vring);
+	if (ret) {
+		DRV_LOG(ERR, "failed to configure queue %d of vDPA device %s",
+			vring, vdev->device->name);
+		return ret;
+	}
+
+	ret = setup_notify_relay(internal);
+	if (ret) {
+		DRV_LOG(ERR, "failed to setup notify relay of vDPA device %s",
+			vdev->device->name);
+		return ret;
+	}
+
+	ret = rte_vhost_host_notifier_ctrl(vid, vring, enable);
+	if (ret) {
+		DRV_LOG(ERR, "vDPA device %s queue %d host notifier ctrl fail",
+			vdev->device->name, vring);
+		return ret;
 	}
 
-exit:
-	hw->vring[vring].enable = !!state;
 	return 0;
 }