[dpdk-dev,v5,resend,07/12] virtio: resolve for control queue

Message ID 1442589061-19225-8-git-send-email-yuanhan.liu@linux.intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Yuanhan Liu Sept. 18, 2015, 3:10 p.m. UTC
  From: Changchun Ouyang <changchun.ouyang@intel.com>

Fix the max virtio queue pair read issue.

Control queue can't work for vhost-user mulitple queue mode,
so introduce a counter to void the dead loop when polling
the control queue.

Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
---
 drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)
  

Comments

Michael S. Tsirkin Sept. 20, 2015, 9:21 a.m. UTC | #1
On Fri, Sep 18, 2015 at 11:10:56PM +0800, Yuanhan Liu wrote:
> From: Changchun Ouyang <changchun.ouyang@intel.com>
> 
> Fix the max virtio queue pair read issue.
> 
> Control queue can't work for vhost-user mulitple queue mode,
> so introduce a counter to void the dead loop when polling
> the control queue.
> 
> Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>

Per virtio spec, the multiqueue feature depends on control queue -
what do you mean when you say it can't work?

> ---
>  drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index 465d3cd..b2f4120 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -1162,7 +1162,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>  	struct virtio_hw *hw = eth_dev->data->dev_private;
>  	struct virtio_net_config *config;
>  	struct virtio_net_config local_config;
> -	uint32_t offset_conf = sizeof(config->mac);
>  	struct rte_pci_device *pci_dev;
>  
>  	RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr));
> @@ -1222,7 +1221,9 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>  		config = &local_config;
>  
>  		if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
> -			offset_conf += sizeof(config->status);
> +			vtpci_read_dev_config(hw,
> +				offsetof(struct virtio_net_config, status),
> +				&config->status, sizeof(config->status));
>  		} else {
>  			PMD_INIT_LOG(DEBUG,
>  				     "VIRTIO_NET_F_STATUS is not supported");
> @@ -1230,15 +1231,16 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>  		}
>  
>  		if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
> -			offset_conf += sizeof(config->max_virtqueue_pairs);
> +			vtpci_read_dev_config(hw,
> +				offsetof(struct virtio_net_config, max_virtqueue_pairs),
> +				&config->max_virtqueue_pairs,
> +				sizeof(config->max_virtqueue_pairs));
>  		} else {
>  			PMD_INIT_LOG(DEBUG,
>  				     "VIRTIO_NET_F_MQ is not supported");
>  			config->max_virtqueue_pairs = 1;
>  		}
>  
> -		vtpci_read_dev_config(hw, 0, (uint8_t *)config, offset_conf);
> -
>  		hw->max_rx_queues =
>  			(VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
>  			VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;


Does the patch actually do what the commit log says?
It seems tobe about reading the device confing,
not breaking out of a loop ...

> -- 
> 1.9.0
  
Yuanhan Liu Sept. 21, 2015, 6:36 a.m. UTC | #2
On Sun, Sep 20, 2015 at 12:21:14PM +0300, Michael S. Tsirkin wrote:
> On Fri, Sep 18, 2015 at 11:10:56PM +0800, Yuanhan Liu wrote:
> > From: Changchun Ouyang <changchun.ouyang@intel.com>
> > 
> > Fix the max virtio queue pair read issue.
> > 
> > Control queue can't work for vhost-user mulitple queue mode,
> > so introduce a counter to void the dead loop when polling
> > the control queue.
> > 
> > Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> > Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> 
> Per virtio spec, the multiqueue feature depends on control queue -
> what do you mean when you say it can't work?
> 
> > ---
> >  drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
> >  1 file changed, 7 insertions(+), 5 deletions(-)
> > 
> > diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> > index 465d3cd..b2f4120 100644
> > --- a/drivers/net/virtio/virtio_ethdev.c
> > +++ b/drivers/net/virtio/virtio_ethdev.c
> > @@ -1162,7 +1162,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> >  	struct virtio_hw *hw = eth_dev->data->dev_private;
> >  	struct virtio_net_config *config;
> >  	struct virtio_net_config local_config;
> > -	uint32_t offset_conf = sizeof(config->mac);
> >  	struct rte_pci_device *pci_dev;
> >  
> >  	RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr));
> > @@ -1222,7 +1221,9 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> >  		config = &local_config;
> >  
> >  		if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
> > -			offset_conf += sizeof(config->status);
> > +			vtpci_read_dev_config(hw,
> > +				offsetof(struct virtio_net_config, status),
> > +				&config->status, sizeof(config->status));
> >  		} else {
> >  			PMD_INIT_LOG(DEBUG,
> >  				     "VIRTIO_NET_F_STATUS is not supported");
> > @@ -1230,15 +1231,16 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> >  		}
> >  
> >  		if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
> > -			offset_conf += sizeof(config->max_virtqueue_pairs);
> > +			vtpci_read_dev_config(hw,
> > +				offsetof(struct virtio_net_config, max_virtqueue_pairs),
> > +				&config->max_virtqueue_pairs,
> > +				sizeof(config->max_virtqueue_pairs));
> >  		} else {
> >  			PMD_INIT_LOG(DEBUG,
> >  				     "VIRTIO_NET_F_MQ is not supported");
> >  			config->max_virtqueue_pairs = 1;
> >  		}
> >  
> > -		vtpci_read_dev_config(hw, 0, (uint8_t *)config, offset_conf);
> > -
> >  		hw->max_rx_queues =
> >  			(VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
> >  			VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
> 
> 
> Does the patch actually do what the commit log says?

Sorry, the commit log is wrong as you said.

It was actually a bug in our code, which happens to be revealed when
MQ is enabled. The old code adjusts the config bytes we want to read
depending on what kind of features we have, but we later cast the
entire buf we read with "struct virtio_net_config", which is obviously
wrong.

The right way to go is to read related config bytes when corresponding
feature is set, which is exactly what this patch does.

> It seems tobe about reading the device confing,
> not breaking out of a loop ...

It's just a (bad) side effect of getting the vritio_net_config wrongly:
the wrong config causes a dead loop in our code.

And sorry for the buggy commit log, will fix it next version.

Thanks.

	--yliu
  
Nikita Kalyazin Oct. 8, 2015, 3:32 p.m. UTC | #3
Hi Yuanhan,


As I understand, the dead loop happened here (virtio_send_command):
while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
      rte_rmb();
      usleep(100);
}

Could you explain why wrong config reading caused that and how correct reading helps to avoid?
  
Steffen Bauch Oct. 8, 2015, 8:51 p.m. UTC | #4
On 10/08/2015 05:32 PM, Nikita Kalyazin wrote:
> Hi Yuanhan,
>
>
> As I understand, the dead loop happened here (virtio_send_command):
> while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
>        rte_rmb();
>        usleep(100);
> }
>
> Could you explain why wrong config reading caused that and how correct reading helps to avoid?
>
Hi,

I just recognized that this dead loop is the same one that I have 
experienced (see 
http://dpdk.org/ml/archives/dev/2015-October/024737.html for reference). 
Just applying the changes in this patch (only 07/12) will not fix the 
dead loop at least in my setup.

Best regards,

Steffen
  
Nikita Kalyazin Oct. 9, 2015, 7:11 a.m. UTC | #5
Hi,

> I just recognized that this dead loop is the same one that I have 
> experienced (see 
> http://dpdk.org/ml/archives/dev/2015-October/024737.html for reference). 
> Just applying the changes in this patch (only 07/12) will not fix the 
> dead loop at least in my setup.
Yes, exactly. I observe it same way even after applying the patch.
  
Yuanhan Liu Oct. 12, 2015, 8:39 a.m. UTC | #6
On Thu, Oct 08, 2015 at 10:51:02PM +0200, Steffen Bauch wrote:
> 
> 
> On 10/08/2015 05:32 PM, Nikita Kalyazin wrote:
> >Hi Yuanhan,
> >
> >
> >As I understand, the dead loop happened here (virtio_send_command):
> >while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
> >       rte_rmb();
> >       usleep(100);
> >}
> >
> >Could you explain why wrong config reading caused that and how correct reading helps to avoid?

Wrong config reading results to wrong config->max_virtqueue_pairs, which
ends up with wrong ctrl vq index being set:

    PMD: virtio_send_command(): vq->vq_queue_index = 37120

Note that you need enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT to see above
debug log.

That is to say we are waiting for the backend to consume a non-exist
queue, and that's how the dead loop comes.


> >
> Hi,
> 
> I just recognized that this dead loop is the same one that I have
> experienced (see
> http://dpdk.org/ml/archives/dev/2015-October/024737.html for
> reference). Just applying the changes in this patch (only 07/12)
> will not fix the dead loop at least in my setup.

Try to enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT, and dump more log?

	--yliu
  
Huawei Xie Oct. 12, 2015, 9:31 a.m. UTC | #7
On 10/12/2015 9:39 AM, Yuanhan Liu wrote:
> On Thu, Oct 08, 2015 at 10:51:02PM +0200, Steffen Bauch wrote:
>>
>> On 10/08/2015 05:32 PM, Nikita Kalyazin wrote:
>>> Hi Yuanhan,
>>>
>>>
>>> As I understand, the dead loop happened here (virtio_send_command):
>>> while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {

Nikita:

Didn't review the whole patch, but happen to  find a serious problem in
the code snippet here, as volatile isn't used, compiler will assume the
memory will not be changed outside and do only one comparison.

Try add volatile prefix, and it might fix your problem.
>>>       rte_rmb();
>>>       usleep(100);
>>> }
>>>
>>> Could you explain why wrong config reading caused that and how correct reading helps to avoid?
> Wrong config reading results to wrong config->max_virtqueue_pairs, which
> ends up with wrong ctrl vq index being set:
>
>     PMD: virtio_send_command(): vq->vq_queue_index = 37120
>
> Note that you need enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT to see above
> debug log.
>
> That is to say we are waiting for the backend to consume a non-exist
> queue, and that's how the dead loop comes.
>
>
>> Hi,
>>
>> I just recognized that this dead loop is the same one that I have
>> experienced (see
>> http://dpdk.org/ml/archives/dev/2015-October/024737.html for
>> reference). Just applying the changes in this patch (only 07/12)
>> will not fix the dead loop at least in my setup.
> Try to enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT, and dump more log?
>
> 	--yliu
>
  
Huawei Xie Oct. 12, 2015, 9:56 a.m. UTC | #8
On 10/12/2015 10:33 AM, Xie, Huawei wrote:
> On 10/12/2015 9:39 AM, Yuanhan Liu wrote:
>> On Thu, Oct 08, 2015 at 10:51:02PM +0200, Steffen Bauch wrote:
>>> On 10/08/2015 05:32 PM, Nikita Kalyazin wrote:
>>>> Hi Yuanhan,
>>>>
>>>>
>>>> As I understand, the dead loop happened here (virtio_send_command):
>>>> while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
> Nikita:
>
> Didn't review the whole patch, but happen to  find a serious problem in
> the code snippet here, as volatile isn't used, compiler will assume the
> memory will not be changed outside and do only one comparison.
>
> Try add volatile prefix, and it might fix your problem.
Read other mails in this thread, if the specific queue is due to wrong
queue index.
Fix the volatile in the code, otherwise if first time no match, the code
will go to dead loop directly and no chance to compare again in
optimized code.
>>>>       rte_rmb();
>>>>       usleep(100);
>>>> }
>>>>
>>>> Could you explain why wrong config reading caused that and how correct reading helps to avoid?
>> Wrong config reading results to wrong config->max_virtqueue_pairs, which
>> ends up with wrong ctrl vq index being set:
>>
>>     PMD: virtio_send_command(): vq->vq_queue_index = 37120
>>
>> Note that you need enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT to see above
>> debug log.
>>
>> That is to say we are waiting for the backend to consume a non-exist
>> queue, and that's how the dead loop comes.
>>
>>
>>> Hi,
>>>
>>> I just recognized that this dead loop is the same one that I have
>>> experienced (see
>>> http://dpdk.org/ml/archives/dev/2015-October/024737.html for
>>> reference). Just applying the changes in this patch (only 07/12)
>>> will not fix the dead loop at least in my setup.
>> Try to enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT, and dump more log?
>>
>> 	--yliu
>>
>
  
Steffen Bauch Oct. 12, 2015, 8:58 p.m. UTC | #9
On 10/12/2015 10:39 AM, Yuanhan Liu wrote:
> Hi,
>
> I just recognized that this dead loop is the same one that I have
> experienced (see
> http://dpdk.org/ml/archives/dev/2015-October/024737.html for
> reference). Just applying the changes in this patch (only 07/12)
> will not fix the dead loop at least in my setup.
> Try to enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT, and dump more log?
I enabled the additional debug output. First try was without any 
additional changes in master, but it blocked also. Second try was with

[dpdk-dev] [PATCH v6 06/13] virtio: read virtio_net_config correctly

applied, but same result.

If you want to recreate my setup, just follow instructions in

http://dpdk.org/ml/archives/dev/2015-October/024737.html


vagrant@vagrant-ubuntu-vivid-64:~/dpdk$ git status
On branch master
Your branch is up-to-date with 'origin/master'.
Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git checkout -- <file>..." to discard changes in working directory)

     modified:   config/defconfig_x86_64-native-linuxapp-gcc

..

vagrant@vagrant-ubuntu-vivid-64:~/dpdk/x86_64-native-linuxapp-gcc/app$ 
sudo ./testpmd -b 0000:00:03.0 -c 3 -n 1 -- -i
EAL: Detected lcore 0 as core 0 on socket 0
EAL: Detected lcore 1 as core 1 on socket 0
EAL: Support maximum 128 logical core(s) by configuration.
EAL: Detected 2 lcore(s)
EAL: VFIO modules not all loaded, skip VFIO support...
EAL: Setting up physically contiguous memory...
EAL: Ask a virtual area of 0x400000 bytes
EAL: Virtual area found at 0x7f2a3a800000 (size = 0x400000)
EAL: Ask a virtual area of 0xe000000 bytes
EAL: Virtual area found at 0x7f2a2c600000 (size = 0xe000000)
EAL: Ask a virtual area of 0x30c00000 bytes
EAL: Virtual area found at 0x7f29fb800000 (size = 0x30c00000)
EAL: Ask a virtual area of 0x400000 bytes
EAL: Virtual area found at 0x7f29fb200000 (size = 0x400000)
EAL: Ask a virtual area of 0xa00000 bytes
EAL: Virtual area found at 0x7f29fa600000 (size = 0xa00000)
EAL: Ask a virtual area of 0x200000 bytes
EAL: Virtual area found at 0x7f29fa200000 (size = 0x200000)
EAL: Requesting 512 pages of size 2MB from socket 0
EAL: TSC frequency is ~2198491 KHz
EAL: WARNING: cpu flags constant_tsc=yes nonstop_tsc=no -> using 
unreliable clock cycles !
EAL: Master lcore 0 is ready (tid=3c9938c0;cpuset=[0])
EAL: lcore 1 is ready (tid=fa1ff700;cpuset=[1])
EAL: PCI device 0000:00:03.0 on NUMA socket -1
EAL:   probe driver: 1af4:1000 rte_virtio_pmd
EAL:   Device is blacklisted, not initializing
EAL: PCI device 0000:00:08.0 on NUMA socket -1
EAL:   probe driver: 1af4:1000 rte_virtio_pmd
PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs value 
/sys/bus/pci/devices/0000:00:08.0/uio/uio0/portio/port0/size
PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio(): 
cannot parse size
PMD: virtio_resource_init_by_ioports(): PCI Port IO found start=0xd040 
with size=0x20
PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
PMD: virtio_negotiate_features(): features after negotiate = f8020
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
PMD: virtio_dev_cq_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 2
PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b54000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f29fb354000
PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
PMD: eth_virtio_dev_init(): config->status=1
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
PMD: eth_virtio_dev_init(): port 0 vendorID=0x1af4 deviceID=0x1000
PMD: virtio_dev_vring_start():  >>
EAL: PCI device 0000:00:09.0 on NUMA socket -1
EAL:   probe driver: 1af4:1000 rte_virtio_pmd
PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs value 
/sys/bus/pci/devices/0000:00:09.0/uio/uio1/portio/port0/size
PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio(): 
cannot parse size
PMD: virtio_resource_init_by_ioports(): PCI Port IO found start=0xd060 
with size=0x20
PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
PMD: virtio_negotiate_features(): features after negotiate = f8020
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
PMD: virtio_dev_cq_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 2
PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b50000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f29fb350000
PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
PMD: eth_virtio_dev_init(): config->status=1
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
PMD: eth_virtio_dev_init(): port 1 vendorID=0x1af4 deviceID=0x1000
PMD: virtio_dev_vring_start():  >>
Interactive-mode selected
Configuring Port 0 (socket 0)
PMD: virtio_dev_configure(): configure
PMD: virtio_dev_tx_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 1
PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:512
PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5fb000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2a3a9fb000
PMD: virtio_dev_rx_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 0
PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:128
PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5f6000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2a3a9f6000
PMD: virtio_dev_link_update(): Get link status from hw
PMD: virtio_dev_link_update(): Port 0 is up
PMD: virtio_dev_rxtx_start():  >>
PMD: virtio_dev_vring_start():  >>
PMD: virtio_dev_vring_start(): Allocated 256 bufs
PMD: virtio_dev_vring_start():  >>
PMD: virtio_dev_start(): nb_queues=1
PMD: virtio_dev_start(): Notified backend at initialization
PMD: virtio_send_command(): vq->vq_desc_head_idx = 0, status = 255, 
vq->hw->cvq = 0x7f29fb3567c0 vq = 0x7f29fb3567c0
PMD: virtio_send_command(): vq->vq_queue_index = 2

[ ... blocking]


Results after patch was applied:

vagrant@vagrant-ubuntu-vivid-64:~/dpdk/x86_64-native-linuxapp-gcc/app$ 
sudo ./testpmd -b 0000:00:03.0 -c 3 -n 1 -- -iEAL: Detected lcore 0 as 
core 0 on socket 0
EAL: Detected lcore 1 as core 1 on socket 0
EAL: Support maximum 128 logical core(s) by configuration.
EAL: Detected 2 lcore(s)
EAL: VFIO modules not all loaded, skip VFIO support...
EAL: Setting up physically contiguous memory...
EAL: Ask a virtual area of 0x400000 bytes
EAL: Virtual area found at 0x7f2c8e000000 (size = 0x400000)
EAL: Ask a virtual area of 0xe000000 bytes
EAL: Virtual area found at 0x7f2c7fe00000 (size = 0xe000000)
EAL: Ask a virtual area of 0x30c00000 bytes
EAL: Virtual area found at 0x7f2c4f000000 (size = 0x30c00000)
EAL: Ask a virtual area of 0x400000 bytes
EAL: Virtual area found at 0x7f2c4ea00000 (size = 0x400000)
EAL: Ask a virtual area of 0xa00000 bytes
EAL: Virtual area found at 0x7f2c4de00000 (size = 0xa00000)
EAL: Ask a virtual area of 0x200000 bytes
EAL: Virtual area found at 0x7f2c4da00000 (size = 0x200000)
EAL: Requesting 512 pages of size 2MB from socket 0
EAL: TSC frequency is ~2198491 KHz
EAL: WARNING: cpu flags constant_tsc=yes nonstop_tsc=no -> using 
unreliable clock cycles !
EAL: Master lcore 0 is ready (tid=904928c0;cpuset=[0])
EAL: lcore 1 is ready (tid=4d9ff700;cpuset=[1])
EAL: PCI device 0000:00:03.0 on NUMA socket -1
EAL:   probe driver: 1af4:1000 rte_virtio_pmd
EAL:   Device is blacklisted, not initializing
EAL: PCI device 0000:00:08.0 on NUMA socket -1
EAL:   probe driver: 1af4:1000 rte_virtio_pmd
PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs value 
/sys/bus/pci/devices/0000:00:08.0/uio/uio0/portio/port0/size
PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio(): 
cannot parse size
PMD: virtio_resource_init_by_ioports(): PCI Port IO found start=0xd040 
with size=0x20
PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
PMD: virtio_negotiate_features(): features after negotiate = f8020
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
PMD: virtio_dev_cq_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 2
PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b54000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c4eb54000
PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
PMD: eth_virtio_dev_init(): config->status=1
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
PMD: eth_virtio_dev_init(): port 0 vendorID=0x1af4 deviceID=0x1000
PMD: virtio_dev_vring_start():  >>
EAL: PCI device 0000:00:09.0 on NUMA socket -1
EAL:   probe driver: 1af4:1000 rte_virtio_pmd
PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs value 
/sys/bus/pci/devices/0000:00:09.0/uio/uio1/portio/port0/size
PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio(): 
cannot parse size
PMD: virtio_resource_init_by_ioports(): PCI Port IO found start=0xd060 
with size=0x20
PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
PMD: virtio_negotiate_features(): features after negotiate = f8020
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
PMD: virtio_dev_cq_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 2
PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b50000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c4eb50000
PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
PMD: eth_virtio_dev_init(): config->status=1
PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
PMD: eth_virtio_dev_init(): port 1 vendorID=0x1af4 deviceID=0x1000
PMD: virtio_dev_vring_start():  >>
Interactive-mode selected
Configuring Port 0 (socket 0)
PMD: virtio_dev_configure(): configure
PMD: virtio_dev_tx_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 1
PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:512
PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5fb000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c8e1fb000
PMD: virtio_dev_rx_queue_setup():  >>
PMD: virtio_dev_queue_setup(): selecting queue: 0
PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:128
PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5f6000
PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c8e1f6000
PMD: virtio_dev_link_update(): Get link status from hw
PMD: virtio_dev_link_update(): Port 0 is up
PMD: virtio_dev_rxtx_start():  >>
PMD: virtio_dev_vring_start():  >>
PMD: virtio_dev_vring_start(): Allocated 256 bufs
PMD: virtio_dev_vring_start():  >>
PMD: virtio_dev_start(): nb_queues=1
PMD: virtio_dev_start(): Notified backend at initialization
PMD: virtio_send_command(): vq->vq_desc_head_idx = 0, status = 255, 
vq->hw->cvq = 0x7f2c4eb567c0 vq = 0x7f2c4eb567c0
PMD: virtio_send_command(): vq->vq_queue_index = 2

[... blocking]
  
Yuanhan Liu Oct. 13, 2015, 9:54 a.m. UTC | #10
On Mon, Oct 12, 2015 at 10:58:17PM +0200, Steffen Bauch wrote:
> On 10/12/2015 10:39 AM, Yuanhan Liu wrote:
> >Hi,
> >
> >I just recognized that this dead loop is the same one that I have
> >experienced (see
> >http://dpdk.org/ml/archives/dev/2015-October/024737.html for
> >reference). Just applying the changes in this patch (only 07/12)
> >will not fix the dead loop at least in my setup.
> >Try to enable CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT, and dump more log?
> I enabled the additional debug output. First try was without any
> additional changes in master, but it blocked also. Second try was
> with
> 
> [dpdk-dev] [PATCH v6 06/13] virtio: read virtio_net_config correctly
> 
> applied, but same result.
> 
> If you want to recreate my setup, just follow instructions in
> 
> http://dpdk.org/ml/archives/dev/2015-October/024737.html
> 
> 
> vagrant@vagrant-ubuntu-vivid-64:~/dpdk$ git status
> On branch master
> Your branch is up-to-date with 'origin/master'.
> Changes not staged for commit:
>   (use "git add <file>..." to update what will be committed)
>   (use "git checkout -- <file>..." to discard changes in working directory)
> 
>     modified:   config/defconfig_x86_64-native-linuxapp-gcc
> 
> ..

Don't have clear clue there. But you could try Huawei's solution first.
It's likely that it will fix your problem.

If not, would you please try to reproduce it with qemu (you were using
virtualbox, right)?  And then dump the whoe command line here so that I
can try to reproduce and debug it on my side. Sorry that I don't use
virtualbox, as well as vagrant.

	--yliu

> 
> vagrant@vagrant-ubuntu-vivid-64:~/dpdk/x86_64-native-linuxapp-gcc/app$
> sudo ./testpmd -b 0000:00:03.0 -c 3 -n 1 -- -i
> EAL: Detected lcore 0 as core 0 on socket 0
> EAL: Detected lcore 1 as core 1 on socket 0
> EAL: Support maximum 128 logical core(s) by configuration.
> EAL: Detected 2 lcore(s)
> EAL: VFIO modules not all loaded, skip VFIO support...
> EAL: Setting up physically contiguous memory...
> EAL: Ask a virtual area of 0x400000 bytes
> EAL: Virtual area found at 0x7f2a3a800000 (size = 0x400000)
> EAL: Ask a virtual area of 0xe000000 bytes
> EAL: Virtual area found at 0x7f2a2c600000 (size = 0xe000000)
> EAL: Ask a virtual area of 0x30c00000 bytes
> EAL: Virtual area found at 0x7f29fb800000 (size = 0x30c00000)
> EAL: Ask a virtual area of 0x400000 bytes
> EAL: Virtual area found at 0x7f29fb200000 (size = 0x400000)
> EAL: Ask a virtual area of 0xa00000 bytes
> EAL: Virtual area found at 0x7f29fa600000 (size = 0xa00000)
> EAL: Ask a virtual area of 0x200000 bytes
> EAL: Virtual area found at 0x7f29fa200000 (size = 0x200000)
> EAL: Requesting 512 pages of size 2MB from socket 0
> EAL: TSC frequency is ~2198491 KHz
> EAL: WARNING: cpu flags constant_tsc=yes nonstop_tsc=no -> using
> unreliable clock cycles !
> EAL: Master lcore 0 is ready (tid=3c9938c0;cpuset=[0])
> EAL: lcore 1 is ready (tid=fa1ff700;cpuset=[1])
> EAL: PCI device 0000:00:03.0 on NUMA socket -1
> EAL:   probe driver: 1af4:1000 rte_virtio_pmd
> EAL:   Device is blacklisted, not initializing
> EAL: PCI device 0000:00:08.0 on NUMA socket -1
> EAL:   probe driver: 1af4:1000 rte_virtio_pmd
> PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs
> value /sys/bus/pci/devices/0000:00:08.0/uio/uio0/portio/port0/size
> PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio():
> cannot parse size
> PMD: virtio_resource_init_by_ioports(): PCI Port IO found
> start=0xd040 with size=0x20
> PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
> PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
> PMD: virtio_negotiate_features(): features after negotiate = f8020
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
> PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
> PMD: virtio_dev_cq_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 2
> PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
> PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b54000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f29fb354000
> PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
> PMD: eth_virtio_dev_init(): config->status=1
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
> PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
> PMD: eth_virtio_dev_init(): port 0 vendorID=0x1af4 deviceID=0x1000
> PMD: virtio_dev_vring_start():  >>
> EAL: PCI device 0000:00:09.0 on NUMA socket -1
> EAL:   probe driver: 1af4:1000 rte_virtio_pmd
> PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs
> value /sys/bus/pci/devices/0000:00:09.0/uio/uio1/portio/port0/size
> PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio():
> cannot parse size
> PMD: virtio_resource_init_by_ioports(): PCI Port IO found
> start=0xd060 with size=0x20
> PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
> PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
> PMD: virtio_negotiate_features(): features after negotiate = f8020
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
> PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
> PMD: virtio_dev_cq_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 2
> PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
> PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b50000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f29fb350000
> PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
> PMD: eth_virtio_dev_init(): config->status=1
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
> PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
> PMD: eth_virtio_dev_init(): port 1 vendorID=0x1af4 deviceID=0x1000
> PMD: virtio_dev_vring_start():  >>
> Interactive-mode selected
> Configuring Port 0 (socket 0)
> PMD: virtio_dev_configure(): configure
> PMD: virtio_dev_tx_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 1
> PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:512
> PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5fb000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2a3a9fb000
> PMD: virtio_dev_rx_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 0
> PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:128
> PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5f6000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2a3a9f6000
> PMD: virtio_dev_link_update(): Get link status from hw
> PMD: virtio_dev_link_update(): Port 0 is up
> PMD: virtio_dev_rxtx_start():  >>
> PMD: virtio_dev_vring_start():  >>
> PMD: virtio_dev_vring_start(): Allocated 256 bufs
> PMD: virtio_dev_vring_start():  >>
> PMD: virtio_dev_start(): nb_queues=1
> PMD: virtio_dev_start(): Notified backend at initialization
> PMD: virtio_send_command(): vq->vq_desc_head_idx = 0, status = 255,
> vq->hw->cvq = 0x7f29fb3567c0 vq = 0x7f29fb3567c0
> PMD: virtio_send_command(): vq->vq_queue_index = 2
> 
> [ ... blocking]
> 
> 
> Results after patch was applied:
> 
> vagrant@vagrant-ubuntu-vivid-64:~/dpdk/x86_64-native-linuxapp-gcc/app$
> sudo ./testpmd -b 0000:00:03.0 -c 3 -n 1 -- -iEAL: Detected lcore 0
> as core 0 on socket 0
> EAL: Detected lcore 1 as core 1 on socket 0
> EAL: Support maximum 128 logical core(s) by configuration.
> EAL: Detected 2 lcore(s)
> EAL: VFIO modules not all loaded, skip VFIO support...
> EAL: Setting up physically contiguous memory...
> EAL: Ask a virtual area of 0x400000 bytes
> EAL: Virtual area found at 0x7f2c8e000000 (size = 0x400000)
> EAL: Ask a virtual area of 0xe000000 bytes
> EAL: Virtual area found at 0x7f2c7fe00000 (size = 0xe000000)
> EAL: Ask a virtual area of 0x30c00000 bytes
> EAL: Virtual area found at 0x7f2c4f000000 (size = 0x30c00000)
> EAL: Ask a virtual area of 0x400000 bytes
> EAL: Virtual area found at 0x7f2c4ea00000 (size = 0x400000)
> EAL: Ask a virtual area of 0xa00000 bytes
> EAL: Virtual area found at 0x7f2c4de00000 (size = 0xa00000)
> EAL: Ask a virtual area of 0x200000 bytes
> EAL: Virtual area found at 0x7f2c4da00000 (size = 0x200000)
> EAL: Requesting 512 pages of size 2MB from socket 0
> EAL: TSC frequency is ~2198491 KHz
> EAL: WARNING: cpu flags constant_tsc=yes nonstop_tsc=no -> using
> unreliable clock cycles !
> EAL: Master lcore 0 is ready (tid=904928c0;cpuset=[0])
> EAL: lcore 1 is ready (tid=4d9ff700;cpuset=[1])
> EAL: PCI device 0000:00:03.0 on NUMA socket -1
> EAL:   probe driver: 1af4:1000 rte_virtio_pmd
> EAL:   Device is blacklisted, not initializing
> EAL: PCI device 0000:00:08.0 on NUMA socket -1
> EAL:   probe driver: 1af4:1000 rte_virtio_pmd
> PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs
> value /sys/bus/pci/devices/0000:00:08.0/uio/uio0/portio/port0/size
> PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio():
> cannot parse size
> PMD: virtio_resource_init_by_ioports(): PCI Port IO found
> start=0xd040 with size=0x20
> PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
> PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
> PMD: virtio_negotiate_features(): features after negotiate = f8020
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
> PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
> PMD: virtio_dev_cq_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 2
> PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
> PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b54000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c4eb54000
> PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
> PMD: eth_virtio_dev_init(): config->status=1
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:CC:DE:CD
> PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
> PMD: eth_virtio_dev_init(): port 0 vendorID=0x1af4 deviceID=0x1000
> PMD: virtio_dev_vring_start():  >>
> EAL: PCI device 0000:00:09.0 on NUMA socket -1
> EAL:   probe driver: 1af4:1000 rte_virtio_pmd
> PMD: parse_sysfs_value(): parse_sysfs_value(): cannot open sysfs
> value /sys/bus/pci/devices/0000:00:09.0/uio/uio1/portio/port0/size
> PMD: virtio_resource_init_by_uio(): virtio_resource_init_by_uio():
> cannot parse size
> PMD: virtio_resource_init_by_ioports(): PCI Port IO found
> start=0xd060 with size=0x20
> PMD: virtio_negotiate_features(): guest_features before negotiate = cf8020
> PMD: virtio_negotiate_features(): host_features before negotiate = 410fdda3
> PMD: virtio_negotiate_features(): features after negotiate = f8020
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
> PMD: eth_virtio_dev_init(): VIRTIO_NET_F_MQ is not supported
> PMD: virtio_dev_cq_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 2
> PMD: virtio_dev_queue_setup(): vq_size: 16 nb_desc:0
> PMD: virtio_dev_queue_setup(): vring_size: 4228, rounded_vring_size: 8192
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x67b50000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c4eb50000
> PMD: eth_virtio_dev_init(): config->max_virtqueue_pairs=1
> PMD: eth_virtio_dev_init(): config->status=1
> PMD: eth_virtio_dev_init(): PORT MAC: 08:00:27:07:D3:F5
> PMD: eth_virtio_dev_init(): hw->max_rx_queues=1 hw->max_tx_queues=1
> PMD: eth_virtio_dev_init(): port 1 vendorID=0x1af4 deviceID=0x1000
> PMD: virtio_dev_vring_start():  >>
> Interactive-mode selected
> Configuring Port 0 (socket 0)
> PMD: virtio_dev_configure(): configure
> PMD: virtio_dev_tx_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 1
> PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:512
> PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5fb000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c8e1fb000
> PMD: virtio_dev_rx_queue_setup():  >>
> PMD: virtio_dev_queue_setup(): selecting queue: 0
> PMD: virtio_dev_queue_setup(): vq_size: 256 nb_desc:128
> PMD: virtio_dev_queue_setup(): vring_size: 10244, rounded_vring_size: 12288
> PMD: virtio_dev_queue_setup(): vq->vq_ring_mem:      0x5f6000
> PMD: virtio_dev_queue_setup(): vq->vq_ring_virt_mem: 0x7f2c8e1f6000
> PMD: virtio_dev_link_update(): Get link status from hw
> PMD: virtio_dev_link_update(): Port 0 is up
> PMD: virtio_dev_rxtx_start():  >>
> PMD: virtio_dev_vring_start():  >>
> PMD: virtio_dev_vring_start(): Allocated 256 bufs
> PMD: virtio_dev_vring_start():  >>
> PMD: virtio_dev_start(): nb_queues=1
> PMD: virtio_dev_start(): Notified backend at initialization
> PMD: virtio_send_command(): vq->vq_desc_head_idx = 0, status = 255,
> vq->hw->cvq = 0x7f2c4eb567c0 vq = 0x7f2c4eb567c0
> PMD: virtio_send_command(): vq->vq_queue_index = 2
> 
> [... blocking]
  

Patch

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 465d3cd..b2f4120 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1162,7 +1162,6 @@  eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 	struct virtio_hw *hw = eth_dev->data->dev_private;
 	struct virtio_net_config *config;
 	struct virtio_net_config local_config;
-	uint32_t offset_conf = sizeof(config->mac);
 	struct rte_pci_device *pci_dev;
 
 	RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr));
@@ -1222,7 +1221,9 @@  eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 		config = &local_config;
 
 		if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
-			offset_conf += sizeof(config->status);
+			vtpci_read_dev_config(hw,
+				offsetof(struct virtio_net_config, status),
+				&config->status, sizeof(config->status));
 		} else {
 			PMD_INIT_LOG(DEBUG,
 				     "VIRTIO_NET_F_STATUS is not supported");
@@ -1230,15 +1231,16 @@  eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 		}
 
 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
-			offset_conf += sizeof(config->max_virtqueue_pairs);
+			vtpci_read_dev_config(hw,
+				offsetof(struct virtio_net_config, max_virtqueue_pairs),
+				&config->max_virtqueue_pairs,
+				sizeof(config->max_virtqueue_pairs));
 		} else {
 			PMD_INIT_LOG(DEBUG,
 				     "VIRTIO_NET_F_MQ is not supported");
 			config->max_virtqueue_pairs = 1;
 		}
 
-		vtpci_read_dev_config(hw, 0, (uint8_t *)config, offset_conf);
-
 		hw->max_rx_queues =
 			(VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
 			VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;