[RFC,17/27] vhost: add control virtqueue support

Message ID 20230331154259.1447831-18-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series Add VDUSE support to Vhost library |

Commit Message

Maxime Coquelin March 31, 2023, 3:42 p.m. UTC
  In order to support multi-queue with VDUSE, having
control queue support in required.

This patch adds control queue implementation, it will be
used later when adding VDUSE support. Only split ring
layout is supported for now, packed ring support will be
added later.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/meson.build       |   1 +
 lib/vhost/vhost.h           |   2 +
 lib/vhost/virtio_net_ctrl.c | 282 ++++++++++++++++++++++++++++++++++++
 lib/vhost/virtio_net_ctrl.h |  10 ++
 4 files changed, 295 insertions(+)
 create mode 100644 lib/vhost/virtio_net_ctrl.c
 create mode 100644 lib/vhost/virtio_net_ctrl.h
  

Comments

Chenbo Xia May 9, 2023, 5:29 a.m. UTC | #1
Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, March 31, 2023 11:43 PM
> To: dev@dpdk.org; david.marchand@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>; mkp@redhat.com; fbl@redhat.com;
> jasowang@redhat.com; Liang, Cunming <cunming.liang@intel.com>; Xie, Yongji
> <xieyongji@bytedance.com>; echaudro@redhat.com; eperezma@redhat.com;
> amorenoz@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [RFC 17/27] vhost: add control virtqueue support
> 
> In order to support multi-queue with VDUSE, having
> control queue support in required.

in -> is

> 
> This patch adds control queue implementation, it will be
> used later when adding VDUSE support. Only split ring
> layout is supported for now, packed ring support will be
> added later.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/vhost/meson.build       |   1 +
>  lib/vhost/vhost.h           |   2 +
>  lib/vhost/virtio_net_ctrl.c | 282 ++++++++++++++++++++++++++++++++++++
>  lib/vhost/virtio_net_ctrl.h |  10 ++
>  4 files changed, 295 insertions(+)
>  create mode 100644 lib/vhost/virtio_net_ctrl.c
>  create mode 100644 lib/vhost/virtio_net_ctrl.h
> 
> diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
> index 197a51d936..cdcd403df3 100644
> --- a/lib/vhost/meson.build
> +++ b/lib/vhost/meson.build
> @@ -28,6 +28,7 @@ sources = files(
>          'vhost_crypto.c',
>          'vhost_user.c',
>          'virtio_net.c',
> +        'virtio_net_ctrl.c',
>  )
>  headers = files(
>          'rte_vdpa.h',
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 8f0875b4e2..76663aed24 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -525,6 +525,8 @@ struct virtio_net {
>  	int			postcopy_ufd;
>  	int			postcopy_listening;
> 
> +	struct vhost_virtqueue	*cvq;
> +
>  	struct rte_vdpa_device *vdpa_dev;
> 
>  	/* context data for the external message handlers */
> diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
> new file mode 100644
> index 0000000000..16ea63b42f
> --- /dev/null
> +++ b/lib/vhost/virtio_net_ctrl.c
> @@ -0,0 +1,282 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (c) 2023 Red Hat, Inc.
> + */
> +
> +#undef RTE_ANNOTATE_LOCKS
> +
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <unistd.h>
> +
> +#include "vhost.h"
> +#include "virtio_net_ctrl.h"
> +
> +struct virtio_net_ctrl {
> +	uint8_t class;
> +	uint8_t command;
> +	uint8_t command_data[];
> +};
> +
> +struct virtio_net_ctrl_elem {
> +	struct virtio_net_ctrl *ctrl_req;
> +	uint16_t head_idx;
> +	uint16_t n_descs;
> +	uint8_t *desc_ack;
> +};
> +
> +static int
> +virtio_net_ctrl_pop(struct virtio_net *dev, struct virtio_net_ctrl_elem
> *ctrl_elem)
> +{
> +	struct vhost_virtqueue *cvq = dev->cvq;
> +	uint16_t avail_idx, desc_idx, n_descs = 0;
> +	uint64_t desc_len, desc_addr, desc_iova, data_len = 0;
> +	uint8_t *ctrl_req;
> +	struct vring_desc *descs;
> +
> +	avail_idx = __atomic_load_n(&cvq->avail->idx, __ATOMIC_ACQUIRE);
> +	if (avail_idx == cvq->last_avail_idx) {
> +		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue empty\n");
> +		return 0;
> +	}
> +
> +	desc_idx = cvq->avail->ring[cvq->last_avail_idx];
> +	if (desc_idx >= cvq->size) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Out of range desc index,
> dropping\n");
> +		goto err;
> +	}
> +
> +	ctrl_elem->head_idx = desc_idx;
> +
> +	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
> +		desc_len = cvq->desc[desc_idx].len;
> +		desc_iova = cvq->desc[desc_idx].addr;
> +
> +		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev,
> cvq,
> +					desc_iova, &desc_len, VHOST_ACCESS_RO);
> +		if (!descs || desc_len != cvq->desc[desc_idx].len) {
> +			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> indirect descs\n");
> +			goto err;
> +		}
> +
> +		desc_idx = 0;
> +	} else {
> +		descs = cvq->desc;
> +	}
> +
> +	while (1) {
> +		desc_len = descs[desc_idx].len;
> +		desc_iova = descs[desc_idx].addr;
> +
> +		n_descs++;
> +
> +		if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
> +			if (ctrl_elem->desc_ack) {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Unexpected ctrl chain layout\n");
> +				goto err;
> +			}
> +
> +			if (desc_len != sizeof(uint8_t)) {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Invalid ack size for ctrl req,
> dropping\n");
> +				goto err;
> +			}
> +
> +			ctrl_elem->desc_ack = (uint8_t
> *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
> +					desc_iova, &desc_len, VHOST_ACCESS_WO);
> +			if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t))
> {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Failed to map ctrl ack descriptor\n");
> +				goto err;
> +			}
> +		} else {
> +			if (ctrl_elem->desc_ack) {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Unexpected ctrl chain layout\n");
> +				goto err;
> +			}
> +
> +			data_len += desc_len;
> +		}
> +
> +		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
> +			break;
> +
> +		desc_idx = descs[desc_idx].next;
> +	}
> +
> +	desc_idx = ctrl_elem->head_idx;
> +
> +	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT)
> +		ctrl_elem->n_descs = 1;
> +	else
> +		ctrl_elem->n_descs = n_descs;
> +
> +	if (!ctrl_elem->desc_ack) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Missing ctrl ack
> descriptor\n");
> +		goto err;
> +	}
> +
> +	if (data_len < sizeof(ctrl_elem->ctrl_req->class) +
> sizeof(ctrl_elem->ctrl_req->command)) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Invalid control header
> size\n");
> +		goto err;
> +	}
> +
> +	ctrl_elem->ctrl_req = malloc(data_len);
> +	if (!ctrl_elem->ctrl_req) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to alloc ctrl
> request\n");
> +		goto err;
> +	}
> +
> +	ctrl_req = (uint8_t *)ctrl_elem->ctrl_req;
> +
> +	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
> +		desc_len = cvq->desc[desc_idx].len;
> +		desc_iova = cvq->desc[desc_idx].addr;
> +
> +		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev,
> cvq,
> +					desc_iova, &desc_len, VHOST_ACCESS_RO);
> +		if (!descs || desc_len != cvq->desc[desc_idx].len) {
> +			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> indirect descs\n");
> +			goto err;

goto free_err?

Thanks,
Chenbo 

> +		}
> +
> +		desc_idx = 0;
> +	} else {
> +		descs = cvq->desc;
> +	}
> +
> +	while (!(descs[desc_idx].flags & VRING_DESC_F_WRITE)) {
> +		desc_len = descs[desc_idx].len;
> +		desc_iova = descs[desc_idx].addr;
> +
> +		desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len,
> VHOST_ACCESS_RO);
> +		if (!desc_addr || desc_len < descs[desc_idx].len) {
> +			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> descriptor\n");
> +			goto free_err;
> +		}
> +
> +		memcpy(ctrl_req, (void *)(uintptr_t)desc_addr, desc_len);
> +		ctrl_req += desc_len;
> +
> +		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
> +			break;
> +
> +		desc_idx = descs[desc_idx].next;
> +	}
> +
> +	cvq->last_avail_idx++;
> +	if (cvq->last_avail_idx >= cvq->size)
> +		cvq->last_avail_idx -= cvq->size;
> +
> +	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
> +		vhost_avail_event(cvq) = cvq->last_avail_idx;
> +
> +	return 1;
> +
> +free_err:
> +	free(ctrl_elem->ctrl_req);
> +err:
> +	cvq->last_avail_idx++;
> +	if (cvq->last_avail_idx >= cvq->size)
> +		cvq->last_avail_idx -= cvq->size;
> +
> +	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
> +		vhost_avail_event(cvq) = cvq->last_avail_idx;
> +
> +	return -1;
> +}
> +
> +static uint8_t
> +virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl
> *ctrl_req)
> +{
> +	uint8_t ret = VIRTIO_NET_ERR;
> +
> +	if (ctrl_req->class == VIRTIO_NET_CTRL_MQ &&
> +			ctrl_req->command == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> +		uint16_t queue_pairs;
> +		uint32_t i;
> +
> +		queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
> +		VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl req: MQ %u queue
> pairs\n", queue_pairs);
> +		ret = VIRTIO_NET_OK;
> +
> +		for (i = 0; i < dev->nr_vring; i++) {
> +			struct vhost_virtqueue *vq = dev->virtqueue[i];
> +			bool enable;
> +
> +			if (vq == dev->cvq)
> +				continue;
> +
> +			if (i < queue_pairs * 2)
> +				enable = true;
> +			else
> +				enable = false;
> +
> +			vq->enabled = enable;
> +			if (dev->notify_ops->vring_state_changed)
> +				dev->notify_ops->vring_state_changed(dev->vid, i,
> enable);
> +		}
> +	}
> +
> +	return ret;
> +}
> +
> +static int
> +virtio_net_ctrl_push(struct virtio_net *dev, struct virtio_net_ctrl_elem
> *ctrl_elem)
> +{
> +	struct vhost_virtqueue *cvq = dev->cvq;
> +	struct vring_used_elem *used_elem;
> +
> +	used_elem = &cvq->used->ring[cvq->last_used_idx];
> +	used_elem->id = ctrl_elem->head_idx;
> +	used_elem->len = ctrl_elem->n_descs;
> +
> +	cvq->last_used_idx++;
> +	if (cvq->last_used_idx >= cvq->size)
> +		cvq->last_used_idx -= cvq->size;
> +
> +	__atomic_store_n(&cvq->used->idx, cvq->last_used_idx,
> __ATOMIC_RELEASE);
> +
> +	free(ctrl_elem->ctrl_req);
> +
> +	return 0;
> +}
> +
> +int
> +virtio_net_ctrl_handle(struct virtio_net *dev)
> +{
> +	int ret = 0;
> +
> +	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Packed ring not supported
> yet\n");
> +		return -1;
> +	}
> +
> +	if (!dev->cvq) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "missing control queue\n");
> +		return -1;
> +	}
> +
> +	rte_spinlock_lock(&dev->cvq->access_lock);
> +
> +	while (1) {
> +		struct virtio_net_ctrl_elem ctrl_elem;
> +
> +		memset(&ctrl_elem, 0, sizeof(struct virtio_net_ctrl_elem));
> +
> +		ret = virtio_net_ctrl_pop(dev, &ctrl_elem);
> +		if (ret <= 0)
> +			break;
> +
> +		*ctrl_elem.desc_ack = virtio_net_ctrl_handle_req(dev,
> ctrl_elem.ctrl_req);
> +
> +		ret = virtio_net_ctrl_push(dev, &ctrl_elem);
> +		if (ret < 0)
> +			break;
> +	}
> +
> +	rte_spinlock_unlock(&dev->cvq->access_lock);
> +
> +	return ret;
> +}
> diff --git a/lib/vhost/virtio_net_ctrl.h b/lib/vhost/virtio_net_ctrl.h
> new file mode 100644
> index 0000000000..9a90f4b9da
> --- /dev/null
> +++ b/lib/vhost/virtio_net_ctrl.h
> @@ -0,0 +1,10 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (c) 2023 Red Hat, Inc.
> + */
> +
> +#ifndef _VIRTIO_NET_CTRL_H
> +#define _VIRTIO_NET_CTRL_H
> +
> +int virtio_net_ctrl_handle(struct virtio_net *dev);
> +
> +#endif
> --
> 2.39.2
  

Patch

diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
index 197a51d936..cdcd403df3 100644
--- a/lib/vhost/meson.build
+++ b/lib/vhost/meson.build
@@ -28,6 +28,7 @@  sources = files(
         'vhost_crypto.c',
         'vhost_user.c',
         'virtio_net.c',
+        'virtio_net_ctrl.c',
 )
 headers = files(
         'rte_vdpa.h',
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 8f0875b4e2..76663aed24 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -525,6 +525,8 @@  struct virtio_net {
 	int			postcopy_ufd;
 	int			postcopy_listening;
 
+	struct vhost_virtqueue	*cvq;
+
 	struct rte_vdpa_device *vdpa_dev;
 
 	/* context data for the external message handlers */
diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
new file mode 100644
index 0000000000..16ea63b42f
--- /dev/null
+++ b/lib/vhost/virtio_net_ctrl.c
@@ -0,0 +1,282 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Red Hat, Inc.
+ */
+
+#undef RTE_ANNOTATE_LOCKS
+
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "vhost.h"
+#include "virtio_net_ctrl.h"
+
+struct virtio_net_ctrl {
+	uint8_t class;
+	uint8_t command;
+	uint8_t command_data[];
+};
+
+struct virtio_net_ctrl_elem {
+	struct virtio_net_ctrl *ctrl_req;
+	uint16_t head_idx;
+	uint16_t n_descs;
+	uint8_t *desc_ack;
+};
+
+static int
+virtio_net_ctrl_pop(struct virtio_net *dev, struct virtio_net_ctrl_elem *ctrl_elem)
+{
+	struct vhost_virtqueue *cvq = dev->cvq;
+	uint16_t avail_idx, desc_idx, n_descs = 0;
+	uint64_t desc_len, desc_addr, desc_iova, data_len = 0;
+	uint8_t *ctrl_req;
+	struct vring_desc *descs;
+
+	avail_idx = __atomic_load_n(&cvq->avail->idx, __ATOMIC_ACQUIRE);
+	if (avail_idx == cvq->last_avail_idx) {
+		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue empty\n");
+		return 0;
+	}
+
+	desc_idx = cvq->avail->ring[cvq->last_avail_idx];
+	if (desc_idx >= cvq->size) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR, "Out of range desc index, dropping\n");
+		goto err;
+	}
+
+	ctrl_elem->head_idx = desc_idx;
+
+	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+		desc_len = cvq->desc[desc_idx].len;
+		desc_iova = cvq->desc[desc_idx].addr;
+
+		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
+					desc_iova, &desc_len, VHOST_ACCESS_RO);
+		if (!descs || desc_len != cvq->desc[desc_idx].len) {
+			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl indirect descs\n");
+			goto err;
+		}
+
+		desc_idx = 0;
+	} else {
+		descs = cvq->desc;
+	}
+
+	while (1) {
+		desc_len = descs[desc_idx].len;
+		desc_iova = descs[desc_idx].addr;
+
+		n_descs++;
+
+		if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
+			if (ctrl_elem->desc_ack) {
+				VHOST_LOG_CONFIG(dev->ifname, ERR,
+						"Unexpected ctrl chain layout\n");
+				goto err;
+			}
+
+			if (desc_len != sizeof(uint8_t)) {
+				VHOST_LOG_CONFIG(dev->ifname, ERR,
+						"Invalid ack size for ctrl req, dropping\n");
+				goto err;
+			}
+
+			ctrl_elem->desc_ack = (uint8_t *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
+					desc_iova, &desc_len, VHOST_ACCESS_WO);
+			if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t)) {
+				VHOST_LOG_CONFIG(dev->ifname, ERR,
+						"Failed to map ctrl ack descriptor\n");
+				goto err;
+			}
+		} else {
+			if (ctrl_elem->desc_ack) {
+				VHOST_LOG_CONFIG(dev->ifname, ERR,
+						"Unexpected ctrl chain layout\n");
+				goto err;
+			}
+
+			data_len += desc_len;
+		}
+
+		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
+			break;
+
+		desc_idx = descs[desc_idx].next;
+	}
+
+	desc_idx = ctrl_elem->head_idx;
+
+	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT)
+		ctrl_elem->n_descs = 1;
+	else
+		ctrl_elem->n_descs = n_descs;
+
+	if (!ctrl_elem->desc_ack) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR, "Missing ctrl ack descriptor\n");
+		goto err;
+	}
+
+	if (data_len < sizeof(ctrl_elem->ctrl_req->class) + sizeof(ctrl_elem->ctrl_req->command)) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR, "Invalid control header size\n");
+		goto err;
+	}
+
+	ctrl_elem->ctrl_req = malloc(data_len);
+	if (!ctrl_elem->ctrl_req) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to alloc ctrl request\n");
+		goto err;
+	}
+
+	ctrl_req = (uint8_t *)ctrl_elem->ctrl_req;
+
+	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+		desc_len = cvq->desc[desc_idx].len;
+		desc_iova = cvq->desc[desc_idx].addr;
+
+		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
+					desc_iova, &desc_len, VHOST_ACCESS_RO);
+		if (!descs || desc_len != cvq->desc[desc_idx].len) {
+			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl indirect descs\n");
+			goto err;
+		}
+
+		desc_idx = 0;
+	} else {
+		descs = cvq->desc;
+	}
+
+	while (!(descs[desc_idx].flags & VRING_DESC_F_WRITE)) {
+		desc_len = descs[desc_idx].len;
+		desc_iova = descs[desc_idx].addr;
+
+		desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len, VHOST_ACCESS_RO);
+		if (!desc_addr || desc_len < descs[desc_idx].len) {
+			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl descriptor\n");
+			goto free_err;
+		}
+
+		memcpy(ctrl_req, (void *)(uintptr_t)desc_addr, desc_len);
+		ctrl_req += desc_len;
+
+		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
+			break;
+
+		desc_idx = descs[desc_idx].next;
+	}
+
+	cvq->last_avail_idx++;
+	if (cvq->last_avail_idx >= cvq->size)
+		cvq->last_avail_idx -= cvq->size;
+
+	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+		vhost_avail_event(cvq) = cvq->last_avail_idx;
+
+	return 1;
+
+free_err:
+	free(ctrl_elem->ctrl_req);
+err:
+	cvq->last_avail_idx++;
+	if (cvq->last_avail_idx >= cvq->size)
+		cvq->last_avail_idx -= cvq->size;
+
+	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+		vhost_avail_event(cvq) = cvq->last_avail_idx;
+
+	return -1;
+}
+
+static uint8_t
+virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl *ctrl_req)
+{
+	uint8_t ret = VIRTIO_NET_ERR;
+
+	if (ctrl_req->class == VIRTIO_NET_CTRL_MQ &&
+			ctrl_req->command == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+		uint16_t queue_pairs;
+		uint32_t i;
+
+		queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
+		VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl req: MQ %u queue pairs\n", queue_pairs);
+		ret = VIRTIO_NET_OK;
+
+		for (i = 0; i < dev->nr_vring; i++) {
+			struct vhost_virtqueue *vq = dev->virtqueue[i];
+			bool enable;
+
+			if (vq == dev->cvq)
+				continue;
+
+			if (i < queue_pairs * 2)
+				enable = true;
+			else
+				enable = false;
+
+			vq->enabled = enable;
+			if (dev->notify_ops->vring_state_changed)
+				dev->notify_ops->vring_state_changed(dev->vid, i, enable);
+		}
+	}
+
+	return ret;
+}
+
+static int
+virtio_net_ctrl_push(struct virtio_net *dev, struct virtio_net_ctrl_elem *ctrl_elem)
+{
+	struct vhost_virtqueue *cvq = dev->cvq;
+	struct vring_used_elem *used_elem;
+
+	used_elem = &cvq->used->ring[cvq->last_used_idx];
+	used_elem->id = ctrl_elem->head_idx;
+	used_elem->len = ctrl_elem->n_descs;
+
+	cvq->last_used_idx++;
+	if (cvq->last_used_idx >= cvq->size)
+		cvq->last_used_idx -= cvq->size;
+
+	__atomic_store_n(&cvq->used->idx, cvq->last_used_idx, __ATOMIC_RELEASE);
+
+	free(ctrl_elem->ctrl_req);
+
+	return 0;
+}
+
+int
+virtio_net_ctrl_handle(struct virtio_net *dev)
+{
+	int ret = 0;
+
+	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR, "Packed ring not supported yet\n");
+		return -1;
+	}
+
+	if (!dev->cvq) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR, "missing control queue\n");
+		return -1;
+	}
+
+	rte_spinlock_lock(&dev->cvq->access_lock);
+
+	while (1) {
+		struct virtio_net_ctrl_elem ctrl_elem;
+
+		memset(&ctrl_elem, 0, sizeof(struct virtio_net_ctrl_elem));
+
+		ret = virtio_net_ctrl_pop(dev, &ctrl_elem);
+		if (ret <= 0)
+			break;
+
+		*ctrl_elem.desc_ack = virtio_net_ctrl_handle_req(dev, ctrl_elem.ctrl_req);
+
+		ret = virtio_net_ctrl_push(dev, &ctrl_elem);
+		if (ret < 0)
+			break;
+	}
+
+	rte_spinlock_unlock(&dev->cvq->access_lock);
+
+	return ret;
+}
diff --git a/lib/vhost/virtio_net_ctrl.h b/lib/vhost/virtio_net_ctrl.h
new file mode 100644
index 0000000000..9a90f4b9da
--- /dev/null
+++ b/lib/vhost/virtio_net_ctrl.h
@@ -0,0 +1,10 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Red Hat, Inc.
+ */
+
+#ifndef _VIRTIO_NET_CTRL_H
+#define _VIRTIO_NET_CTRL_H
+
+int virtio_net_ctrl_handle(struct virtio_net *dev);
+
+#endif