[v1,21/21] net/virtio-user: remove max queues limitation

Message ID 20221130155639.150553-22-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series Add control queue & MQ support to Virtio-user vDPA |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/github-robot: build success github build: passed
ci/intel-Testing fail Testing issues
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-abi-testing warning Testing issues
ci/iol-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS

Commit Message

Maxime Coquelin Nov. 30, 2022, 3:56 p.m. UTC
  This patch removes the limitation of 8 queue pairs by
dynamically allocating vring metadata once we know the
maximum number of queue pairs supported by the backend.

This is especially useful for Vhost-vDPA with physical
devices, where the maximum queues supported may be much
more than 8 pairs.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio.h                   |   6 -
 .../net/virtio/virtio_user/virtio_user_dev.c  | 118 ++++++++++++++----
 .../net/virtio/virtio_user/virtio_user_dev.h  |  16 +--
 drivers/net/virtio/virtio_user_ethdev.c       |  17 +--
 4 files changed, 109 insertions(+), 48 deletions(-)
  

Comments

Chenbo Xia Jan. 31, 2023, 5:19 a.m. UTC | #1
Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, November 30, 2022 11:57 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; eperezma@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v1 21/21] net/virtio-user: remove max queues limitation
> 
> This patch removes the limitation of 8 queue pairs by
> dynamically allocating vring metadata once we know the
> maximum number of queue pairs supported by the backend.
> 
> This is especially useful for Vhost-vDPA with physical
> devices, where the maximum queues supported may be much
> more than 8 pairs.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio.h                   |   6 -
>  .../net/virtio/virtio_user/virtio_user_dev.c  | 118 ++++++++++++++----
>  .../net/virtio/virtio_user/virtio_user_dev.h  |  16 +--
>  drivers/net/virtio/virtio_user_ethdev.c       |  17 +--
>  4 files changed, 109 insertions(+), 48 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h
> index 5c8f71a44d..04a897bf51 100644
> --- a/drivers/net/virtio/virtio.h
> +++ b/drivers/net/virtio/virtio.h
> @@ -124,12 +124,6 @@
>  	VIRTIO_NET_HASH_TYPE_UDP_EX)
> 
> 
> -/*
> - * Maximum number of virtqueues per device.
> - */
> -#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
> -#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
> -
>  /* VirtIO device IDs. */
>  #define VIRTIO_ID_NETWORK  0x01
>  #define VIRTIO_ID_BLOCK    0x02
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> index 7c48c9bb29..aa24fdea70 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> @@ -17,6 +17,7 @@
>  #include <rte_alarm.h>
>  #include <rte_string_fns.h>
>  #include <rte_eal_memconfig.h>
> +#include <rte_malloc.h>
> 
>  #include "vhost.h"
>  #include "virtio_user_dev.h"
> @@ -58,8 +59,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev,
> uint32_t queue_sel)
>  	int ret;
>  	struct vhost_vring_file file;
>  	struct vhost_vring_state state;
> -	struct vring *vring = &dev->vrings[queue_sel];
> -	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
> +	struct vring *vring = &dev->vrings.split[queue_sel];
> +	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
>  	struct vhost_vring_addr addr = {
>  		.index = queue_sel,
>  		.log_guest_addr = 0,
> @@ -299,18 +300,6 @@ virtio_user_dev_init_max_queue_pairs(struct
> virtio_user_dev *dev, uint32_t user_
>  		return ret;
>  	}
> 
> -	if (dev->max_queue_pairs > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
> -		/*
> -		 * If the device supports control queue, the control queue
> -		 * index is max_virtqueue_pairs * 2. Disable MQ if it happens.
> -		 */
> -		PMD_DRV_LOG(ERR, "(%s) Device advertises too many queues (%u,
> max supported %u)",
> -				dev->path, dev->max_queue_pairs,
> VIRTIO_MAX_VIRTQUEUE_PAIRS);
> -		dev->max_queue_pairs = 1;
> -
> -		return -1;
> -	}
> -
>  	return 0;
>  }
> 
> @@ -579,6 +568,86 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
>  	return 0;
>  }
> 
> +static int
> +virtio_user_alloc_vrings(struct virtio_user_dev *dev)
> +{
> +	int i, size, nr_vrings;
> +
> +	nr_vrings = dev->max_queue_pairs * 2;
> +	if (dev->hw_cvq)
> +		nr_vrings++;
> +
> +	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings *
> sizeof(*dev->callfds), 0);
> +	if (!dev->callfds) {
> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
> +		return -1;
> +	}
> +
> +	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings *
> sizeof(*dev->kickfds), 0);
> +	if (!dev->kickfds) {
> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
> +		goto free_callfds;
> +	}
> +
> +	for (i = 0; i < nr_vrings; i++) {
> +		dev->callfds[i] = -1;
> +		dev->kickfds[i] = -1;
> +	}
> +
> +	size = RTE_MAX(sizeof(*dev->vrings.split), sizeof(*dev-
> >vrings.packed));
> +	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size,
> 0);
> +	if (!dev->vrings.ptr) {
> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev-
> >path);
> +		goto free_kickfds;
> +	}
> +
> +	dev->packed_queues = rte_zmalloc("virtio_user_dev",
> +			nr_vrings * sizeof(*dev->packed_queues), 0);

Should we pass the info of packed vq or not to save the alloc of
dev->packed_queues, also to know correct size of dev->vrings.ptr.

Thanks,
Chenbo

> +	if (!dev->packed_queues) {
> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues
> metadata", dev->path);
> +		goto free_vrings;
> +	}
> +
> +	dev->qp_enabled = rte_zmalloc("virtio_user_dev",
> +			dev->max_queue_pairs * sizeof(*dev->qp_enabled), 0);
> +	if (!dev->qp_enabled) {
> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states",
> dev->path);
> +		goto free_packed_queues;
> +	}
> +
> +	return 0;
> +
> +free_packed_queues:
> +	rte_free(dev->packed_queues);
> +	dev->packed_queues = NULL;
> +free_vrings:
> +	rte_free(dev->vrings.ptr);
> +	dev->vrings.ptr = NULL;
> +free_kickfds:
> +	rte_free(dev->kickfds);
> +	dev->kickfds = NULL;
> +free_callfds:
> +	rte_free(dev->callfds);
> +	dev->callfds = NULL;
> +
> +	return -1;
> +}
> +
> +static void
> +virtio_user_free_vrings(struct virtio_user_dev *dev)
> +{
> +	rte_free(dev->qp_enabled);
> +	dev->qp_enabled = NULL;
> +	rte_free(dev->packed_queues);
> +	dev->packed_queues = NULL;
> +	rte_free(dev->vrings.ptr);
> +	dev->vrings.ptr = NULL;
> +	rte_free(dev->kickfds);
> +	dev->kickfds = NULL;
> +	rte_free(dev->callfds);
> +	dev->callfds = NULL;
> +}
> +
>  /* Use below macro to filter features from vhost backend */
>  #define VIRTIO_USER_SUPPORTED_FEATURES			\
>  	(1ULL << VIRTIO_NET_F_MAC		|	\
> @@ -607,16 +676,10 @@ virtio_user_dev_init(struct virtio_user_dev *dev,
> char *path, uint16_t queues,
>  		     enum virtio_user_backend_type backend_type)
>  {
>  	uint64_t backend_features;
> -	int i;
> 
>  	pthread_mutex_init(&dev->mutex, NULL);
>  	strlcpy(dev->path, path, PATH_MAX);
> 
> -	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
> -		dev->kickfds[i] = -1;
> -		dev->callfds[i] = -1;
> -	}
> -
>  	dev->started = 0;
>  	dev->queue_pairs = 1; /* mq disabled by default */
>  	dev->queue_size = queue_size;
> @@ -661,9 +724,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev,
> char *path, uint16_t queues,
>  	if (dev->max_queue_pairs > 1)
>  		cq = 1;
> 
> +	if (virtio_user_alloc_vrings(dev) < 0) {
> +		PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata",
> dev->path);
> +		goto destroy;
> +	}
> +
>  	if (virtio_user_dev_init_notify(dev) < 0) {
>  		PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
> -		goto destroy;
> +		goto free_vrings;
>  	}
> 
>  	if (virtio_user_fill_intr_handle(dev) < 0) {
> @@ -722,6 +790,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char
> *path, uint16_t queues,
> 
>  notify_uninit:
>  	virtio_user_dev_uninit_notify(dev);
> +free_vrings:
> +	virtio_user_free_vrings(dev);
>  destroy:
>  	dev->ops->destroy(dev);
> 
> @@ -742,6 +812,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
> 
>  	virtio_user_dev_uninit_notify(dev);
> 
> +	virtio_user_free_vrings(dev);
> +
>  	free(dev->ifname);
> 
>  	if (dev->is_server)
> @@ -897,7 +969,7 @@ static void
>  virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t
> queue_idx)
>  {
>  	struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
> -	struct vring_packed *vring = &dev->packed_vrings[queue_idx];
> +	struct vring_packed *vring = &dev->vrings.packed[queue_idx];
>  	uint16_t n_descs, flags;
> 
>  	/* Perform a load-acquire barrier in desc_is_avail to
> @@ -931,7 +1003,7 @@ virtio_user_handle_cq_split(struct virtio_user_dev
> *dev, uint16_t queue_idx)
>  	uint16_t avail_idx, desc_idx;
>  	struct vring_used_elem *uep;
>  	uint32_t n_descs;
> -	struct vring *vring = &dev->vrings[queue_idx];
> +	struct vring *vring = &dev->vrings.split[queue_idx];
> 
>  	/* Consume avail ring, using used ring idx as first one */
>  	while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h
> b/drivers/net/virtio/virtio_user/virtio_user_dev.h
> index e8753f6019..7323d88302 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
> @@ -29,8 +29,8 @@ struct virtio_user_dev {
>  	enum virtio_user_backend_type backend_type;
>  	bool		is_server;  /* server or client mode */
> 
> -	int		callfds[VIRTIO_MAX_VIRTQUEUES];
> -	int		kickfds[VIRTIO_MAX_VIRTQUEUES];
> +	int		*callfds;
> +	int		*kickfds;
>  	int		mac_specified;
>  	uint16_t	max_queue_pairs;
>  	uint16_t	queue_pairs;
> @@ -48,11 +48,13 @@ struct virtio_user_dev {
>  	char		*ifname;
> 
>  	union {
> -		struct vring		vrings[VIRTIO_MAX_VIRTQUEUES];
> -		struct vring_packed	packed_vrings[VIRTIO_MAX_VIRTQUEUES];
> -	};
> -	struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
> -	bool		qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
> +		void			*ptr;
> +		struct vring		*split;
> +		struct vring_packed	*packed;
> +	} vrings;
> +
> +	struct virtio_user_queue *packed_queues;
> +	bool		*qp_enabled;
> 
>  	struct virtio_user_backend_ops *ops;
>  	pthread_mutex_t	mutex;
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index d23959e836..b1fc4d5d30 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -186,7 +186,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
>  	uint64_t used_addr;
>  	uint16_t i;
> 
> -	vring  = &dev->packed_vrings[queue_idx];
> +	vring  = &dev->vrings.packed[queue_idx];
>  	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
>  	avail_addr = desc_addr + vq->vq_nentries *
>  		sizeof(struct vring_packed_desc);
> @@ -216,10 +216,10 @@ virtio_user_setup_queue_split(struct virtqueue *vq,
> struct virtio_user_dev *dev)
>  							 ring[vq->vq_nentries]),
>  				   VIRTIO_VRING_ALIGN);
> 
> -	dev->vrings[queue_idx].num = vq->vq_nentries;
> -	dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
> -	dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
> -	dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
> +	dev->vrings.split[queue_idx].num = vq->vq_nentries;
> +	dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
> +	dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
> +	dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
>  }
> 
>  static int
> @@ -619,13 +619,6 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
>  		}
>  	}
> 
> -	if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
> -		PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
> -			VIRTIO_USER_ARG_QUEUES_NUM, queues,
> -			VIRTIO_MAX_VIRTQUEUE_PAIRS);
> -		goto end;
> -	}
> -
>  	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
>  		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
>  				       &get_integer_arg, &mrg_rxbuf) < 0) {
> --
> 2.38.1
  
Maxime Coquelin Feb. 7, 2023, 2:14 p.m. UTC | #2
On 1/31/23 06:19, Xia, Chenbo wrote:
> Hi Maxime,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Wednesday, November 30, 2022 11:57 PM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
>> david.marchand@redhat.com; eperezma@redhat.com
>> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Subject: [PATCH v1 21/21] net/virtio-user: remove max queues limitation
>>
>> This patch removes the limitation of 8 queue pairs by
>> dynamically allocating vring metadata once we know the
>> maximum number of queue pairs supported by the backend.
>>
>> This is especially useful for Vhost-vDPA with physical
>> devices, where the maximum queues supported may be much
>> more than 8 pairs.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>>   drivers/net/virtio/virtio.h                   |   6 -
>>   .../net/virtio/virtio_user/virtio_user_dev.c  | 118 ++++++++++++++----
>>   .../net/virtio/virtio_user/virtio_user_dev.h  |  16 +--
>>   drivers/net/virtio/virtio_user_ethdev.c       |  17 +--
>>   4 files changed, 109 insertions(+), 48 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h
>> index 5c8f71a44d..04a897bf51 100644
>> --- a/drivers/net/virtio/virtio.h
>> +++ b/drivers/net/virtio/virtio.h
>> @@ -124,12 +124,6 @@
>>   	VIRTIO_NET_HASH_TYPE_UDP_EX)
>>
>>
>> -/*
>> - * Maximum number of virtqueues per device.
>> - */
>> -#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
>> -#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
>> -
>>   /* VirtIO device IDs. */
>>   #define VIRTIO_ID_NETWORK  0x01
>>   #define VIRTIO_ID_BLOCK    0x02
>> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> index 7c48c9bb29..aa24fdea70 100644
>> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> @@ -17,6 +17,7 @@
>>   #include <rte_alarm.h>
>>   #include <rte_string_fns.h>
>>   #include <rte_eal_memconfig.h>
>> +#include <rte_malloc.h>
>>
>>   #include "vhost.h"
>>   #include "virtio_user_dev.h"
>> @@ -58,8 +59,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev,
>> uint32_t queue_sel)
>>   	int ret;
>>   	struct vhost_vring_file file;
>>   	struct vhost_vring_state state;
>> -	struct vring *vring = &dev->vrings[queue_sel];
>> -	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
>> +	struct vring *vring = &dev->vrings.split[queue_sel];
>> +	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
>>   	struct vhost_vring_addr addr = {
>>   		.index = queue_sel,
>>   		.log_guest_addr = 0,
>> @@ -299,18 +300,6 @@ virtio_user_dev_init_max_queue_pairs(struct
>> virtio_user_dev *dev, uint32_t user_
>>   		return ret;
>>   	}
>>
>> -	if (dev->max_queue_pairs > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
>> -		/*
>> -		 * If the device supports control queue, the control queue
>> -		 * index is max_virtqueue_pairs * 2. Disable MQ if it happens.
>> -		 */
>> -		PMD_DRV_LOG(ERR, "(%s) Device advertises too many queues (%u,
>> max supported %u)",
>> -				dev->path, dev->max_queue_pairs,
>> VIRTIO_MAX_VIRTQUEUE_PAIRS);
>> -		dev->max_queue_pairs = 1;
>> -
>> -		return -1;
>> -	}
>> -
>>   	return 0;
>>   }
>>
>> @@ -579,6 +568,86 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
>>   	return 0;
>>   }
>>
>> +static int
>> +virtio_user_alloc_vrings(struct virtio_user_dev *dev)
>> +{
>> +	int i, size, nr_vrings;
>> +
>> +	nr_vrings = dev->max_queue_pairs * 2;
>> +	if (dev->hw_cvq)
>> +		nr_vrings++;
>> +
>> +	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings *
>> sizeof(*dev->callfds), 0);
>> +	if (!dev->callfds) {
>> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
>> +		return -1;
>> +	}
>> +
>> +	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings *
>> sizeof(*dev->kickfds), 0);
>> +	if (!dev->kickfds) {
>> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
>> +		goto free_callfds;
>> +	}
>> +
>> +	for (i = 0; i < nr_vrings; i++) {
>> +		dev->callfds[i] = -1;
>> +		dev->kickfds[i] = -1;
>> +	}
>> +
>> +	size = RTE_MAX(sizeof(*dev->vrings.split), sizeof(*dev-
>>> vrings.packed));
>> +	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size,
>> 0);
>> +	if (!dev->vrings.ptr) {
>> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev-
>>> path);
>> +		goto free_kickfds;
>> +	}
>> +
>> +	dev->packed_queues = rte_zmalloc("virtio_user_dev",
>> +			nr_vrings * sizeof(*dev->packed_queues), 0);
> 
> Should we pass the info of packed vq or not to save the alloc of
> dev->packed_queues, also to know correct size of dev->vrings.ptr.

That's not ideal because the negotiation haven't taken place yet with 
the Virtio layer, but it should be doable for packed ring specifically 
since it is only possible to disable it via the devargs, not at run
time.

Thanks,
Maxime
  

Patch

diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h
index 5c8f71a44d..04a897bf51 100644
--- a/drivers/net/virtio/virtio.h
+++ b/drivers/net/virtio/virtio.h
@@ -124,12 +124,6 @@ 
 	VIRTIO_NET_HASH_TYPE_UDP_EX)
 
 
-/*
- * Maximum number of virtqueues per device.
- */
-#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
-#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
-
 /* VirtIO device IDs. */
 #define VIRTIO_ID_NETWORK  0x01
 #define VIRTIO_ID_BLOCK    0x02
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 7c48c9bb29..aa24fdea70 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -17,6 +17,7 @@ 
 #include <rte_alarm.h>
 #include <rte_string_fns.h>
 #include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
 
 #include "vhost.h"
 #include "virtio_user_dev.h"
@@ -58,8 +59,8 @@  virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 	int ret;
 	struct vhost_vring_file file;
 	struct vhost_vring_state state;
-	struct vring *vring = &dev->vrings[queue_sel];
-	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
+	struct vring *vring = &dev->vrings.split[queue_sel];
+	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
 	struct vhost_vring_addr addr = {
 		.index = queue_sel,
 		.log_guest_addr = 0,
@@ -299,18 +300,6 @@  virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_
 		return ret;
 	}
 
-	if (dev->max_queue_pairs > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
-		/*
-		 * If the device supports control queue, the control queue
-		 * index is max_virtqueue_pairs * 2. Disable MQ if it happens.
-		 */
-		PMD_DRV_LOG(ERR, "(%s) Device advertises too many queues (%u, max supported %u)",
-				dev->path, dev->max_queue_pairs, VIRTIO_MAX_VIRTQUEUE_PAIRS);
-		dev->max_queue_pairs = 1;
-
-		return -1;
-	}
-
 	return 0;
 }
 
@@ -579,6 +568,86 @@  virtio_user_dev_setup(struct virtio_user_dev *dev)
 	return 0;
 }
 
+static int
+virtio_user_alloc_vrings(struct virtio_user_dev *dev)
+{
+	int i, size, nr_vrings;
+
+	nr_vrings = dev->max_queue_pairs * 2;
+	if (dev->hw_cvq)
+		nr_vrings++;
+
+	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0);
+	if (!dev->callfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
+		return -1;
+	}
+
+	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0);
+	if (!dev->kickfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
+		goto free_callfds;
+	}
+
+	for (i = 0; i < nr_vrings; i++) {
+		dev->callfds[i] = -1;
+		dev->kickfds[i] = -1;
+	}
+
+	size = RTE_MAX(sizeof(*dev->vrings.split), sizeof(*dev->vrings.packed));
+	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0);
+	if (!dev->vrings.ptr) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path);
+		goto free_kickfds;
+	}
+
+	dev->packed_queues = rte_zmalloc("virtio_user_dev",
+			nr_vrings * sizeof(*dev->packed_queues), 0);
+	if (!dev->packed_queues) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata", dev->path);
+		goto free_vrings;
+	}
+
+	dev->qp_enabled = rte_zmalloc("virtio_user_dev",
+			dev->max_queue_pairs * sizeof(*dev->qp_enabled), 0);
+	if (!dev->qp_enabled) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path);
+		goto free_packed_queues;
+	}
+
+	return 0;
+
+free_packed_queues:
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+free_vrings:
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+free_kickfds:
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+free_callfds:
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+
+	return -1;
+}
+
+static void
+virtio_user_free_vrings(struct virtio_user_dev *dev)
+{
+	rte_free(dev->qp_enabled);
+	dev->qp_enabled = NULL;
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+}
+
 /* Use below macro to filter features from vhost backend */
 #define VIRTIO_USER_SUPPORTED_FEATURES			\
 	(1ULL << VIRTIO_NET_F_MAC		|	\
@@ -607,16 +676,10 @@  virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
 		     enum virtio_user_backend_type backend_type)
 {
 	uint64_t backend_features;
-	int i;
 
 	pthread_mutex_init(&dev->mutex, NULL);
 	strlcpy(dev->path, path, PATH_MAX);
 
-	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
-		dev->kickfds[i] = -1;
-		dev->callfds[i] = -1;
-	}
-
 	dev->started = 0;
 	dev->queue_pairs = 1; /* mq disabled by default */
 	dev->queue_size = queue_size;
@@ -661,9 +724,14 @@  virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
 	if (dev->max_queue_pairs > 1)
 		cq = 1;
 
+	if (virtio_user_alloc_vrings(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path);
+		goto destroy;
+	}
+
 	if (virtio_user_dev_init_notify(dev) < 0) {
 		PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
-		goto destroy;
+		goto free_vrings;
 	}
 
 	if (virtio_user_fill_intr_handle(dev) < 0) {
@@ -722,6 +790,8 @@  virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
 
 notify_uninit:
 	virtio_user_dev_uninit_notify(dev);
+free_vrings:
+	virtio_user_free_vrings(dev);
 destroy:
 	dev->ops->destroy(dev);
 
@@ -742,6 +812,8 @@  virtio_user_dev_uninit(struct virtio_user_dev *dev)
 
 	virtio_user_dev_uninit_notify(dev);
 
+	virtio_user_free_vrings(dev);
+
 	free(dev->ifname);
 
 	if (dev->is_server)
@@ -897,7 +969,7 @@  static void
 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
 {
 	struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
-	struct vring_packed *vring = &dev->packed_vrings[queue_idx];
+	struct vring_packed *vring = &dev->vrings.packed[queue_idx];
 	uint16_t n_descs, flags;
 
 	/* Perform a load-acquire barrier in desc_is_avail to
@@ -931,7 +1003,7 @@  virtio_user_handle_cq_split(struct virtio_user_dev *dev, uint16_t queue_idx)
 	uint16_t avail_idx, desc_idx;
 	struct vring_used_elem *uep;
 	uint32_t n_descs;
-	struct vring *vring = &dev->vrings[queue_idx];
+	struct vring *vring = &dev->vrings.split[queue_idx];
 
 	/* Consume avail ring, using used ring idx as first one */
 	while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index e8753f6019..7323d88302 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -29,8 +29,8 @@  struct virtio_user_dev {
 	enum virtio_user_backend_type backend_type;
 	bool		is_server;  /* server or client mode */
 
-	int		callfds[VIRTIO_MAX_VIRTQUEUES];
-	int		kickfds[VIRTIO_MAX_VIRTQUEUES];
+	int		*callfds;
+	int		*kickfds;
 	int		mac_specified;
 	uint16_t	max_queue_pairs;
 	uint16_t	queue_pairs;
@@ -48,11 +48,13 @@  struct virtio_user_dev {
 	char		*ifname;
 
 	union {
-		struct vring		vrings[VIRTIO_MAX_VIRTQUEUES];
-		struct vring_packed	packed_vrings[VIRTIO_MAX_VIRTQUEUES];
-	};
-	struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
-	bool		qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
+		void			*ptr;
+		struct vring		*split;
+		struct vring_packed	*packed;
+	} vrings;
+
+	struct virtio_user_queue *packed_queues;
+	bool		*qp_enabled;
 
 	struct virtio_user_backend_ops *ops;
 	pthread_mutex_t	mutex;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index d23959e836..b1fc4d5d30 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -186,7 +186,7 @@  virtio_user_setup_queue_packed(struct virtqueue *vq,
 	uint64_t used_addr;
 	uint16_t i;
 
-	vring  = &dev->packed_vrings[queue_idx];
+	vring  = &dev->vrings.packed[queue_idx];
 	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
 	avail_addr = desc_addr + vq->vq_nentries *
 		sizeof(struct vring_packed_desc);
@@ -216,10 +216,10 @@  virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
 							 ring[vq->vq_nentries]),
 				   VIRTIO_VRING_ALIGN);
 
-	dev->vrings[queue_idx].num = vq->vq_nentries;
-	dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
-	dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
-	dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+	dev->vrings.split[queue_idx].num = vq->vq_nentries;
+	dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
+	dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
+	dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
 }
 
 static int
@@ -619,13 +619,6 @@  virtio_user_pmd_probe(struct rte_vdev_device *vdev)
 		}
 	}
 
-	if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
-		PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
-			VIRTIO_USER_ARG_QUEUES_NUM, queues,
-			VIRTIO_MAX_VIRTQUEUE_PAIRS);
-		goto end;
-	}
-
 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
 				       &get_integer_arg, &mrg_rxbuf) < 0) {