[v2,2/6] net/iavf_be: control queue enabling

Message ID 20210107071503.14720-3-jingjing.wu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series introduce iavf backend driver |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Jingjing Wu Jan. 7, 2021, 7:14 a.m. UTC
  1. Set up control rx/tx queues.
2. Emu device callback functions implemention.
3. Enabling recv/send msg through control queue.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Chenbo Xia <chenbo.xia@intel.com>
Signed-off-by: Xiuchun Lu <xiuchun.lu@intel.com>
---
 drivers/net/iavf_be/iavf_be.h        |  38 ++++
 drivers/net/iavf_be/iavf_be_ethdev.c | 321 ++++++++++++++++++++++++++-
 drivers/net/iavf_be/iavf_be_vchnl.c  | 287 ++++++++++++++++++++++++
 drivers/net/iavf_be/meson.build      |   1 +
 4 files changed, 645 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/iavf_be/iavf_be_vchnl.c
  

Patch

diff --git a/drivers/net/iavf_be/iavf_be.h b/drivers/net/iavf_be/iavf_be.h
index 956955786a..c182d9558a 100644
--- a/drivers/net/iavf_be/iavf_be.h
+++ b/drivers/net/iavf_be/iavf_be.h
@@ -5,13 +5,48 @@ 
 #ifndef _IAVF_BE_H_
 #define _IAVF_BE_H_
 
+#define IAVF_BE_AQ_BUF_SZ            4096
+#define IAVF_BE_32_TO_64(hi, lo) ((((uint64_t)(hi)) << 32) + (lo))
+
+#define IAVFBE_READ_32(addr)        \
+	rte_le_to_cpu_32(*(volatile uint32_t *)(addr))
+#define IAVFBE_WRITE_32(addr, val)  \
+	*(volatile uint32_t *)(addr) = rte_cpu_to_le_32(val);
+
+struct iavfbe_control_q {
+	rte_spinlock_t access_lock;
+	struct rte_emudev_q_info q_info;
+	struct iavf_aq_desc *ring;
+	uint64_t p_ring_addr;	/* Guest physical address of the ring */
+	uint16_t len;
+	volatile uint8_t *tail;
+	volatile uint8_t *head;
+
+	uint16_t next_to_use;
+	uint16_t next_to_clean;
+
+	uint32_t cmd_retval; /* return value of the cmd response from PF */
+	uint8_t *aq_req;     /* buffer to store the adminq request from VF, NULL if arq */
+};
+
+/* Control queue structure of iavf */
+struct iavfbe_controlq_info {
+	struct iavfbe_control_q asq;
+	struct iavfbe_control_q arq;
+};
+
 /* Structure to store private data for backend instance*/
 struct iavfbe_adapter {
 	struct rte_eth_dev *eth_dev;
 	struct rte_emudev *emu_dev;
 	uint16_t edev_id;  /* Emulated Device ID */
 	struct rte_emudev_info dev_info;
+	struct rte_iavf_emu_mem *mem_table;
 
+	struct iavfbe_controlq_info cq_info; /* Control/Admin Queue info*/
+	/* Adminq handle thread info */
+	volatile int thread_status;
+	pthread_t thread_id;
 	uint16_t nb_qps;
 	bool link_up;
 	int cq_irqfd;
@@ -31,6 +66,9 @@  struct iavfbe_adapter {
 	((struct iavfbe_adapter *)adapter)
 
 int iavfbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+void iavfbe_handle_virtchnl_msg(void *arg);
+void iavfbe_reset_asq(struct iavfbe_adapter *adapter, bool lock);
+void iavfbe_reset_arq(struct iavfbe_adapter *adapter, bool lock);
 
 extern int iavfbe_logtype;
 #define IAVF_BE_LOG(level, fmt, args...) \
diff --git a/drivers/net/iavf_be/iavf_be_ethdev.c b/drivers/net/iavf_be/iavf_be_ethdev.c
index 3d5ca34ec0..2ab66f889d 100644
--- a/drivers/net/iavf_be/iavf_be_ethdev.c
+++ b/drivers/net/iavf_be/iavf_be_ethdev.c
@@ -14,6 +14,7 @@ 
 #include <rte_iavf_emu.h>
 
 #include <iavf_type.h>
+#include <virtchnl.h>
 #include "iavf_be.h"
 
 #define AVFBE_EDEV_ID_ARG "emu"
@@ -33,6 +34,12 @@  static struct rte_eth_link iavfbe_link = {
 	.link_status = ETH_LINK_DOWN
 };
 
+static int iavfbe_new_device(struct rte_emudev *dev);
+static void iavfbe_destroy_device(struct rte_emudev *dev);
+static int iavfbe_update_device(struct rte_emudev *dev);
+static int iavfbe_lock_dp(struct rte_emudev *dev, int lock);
+static int iavfbe_reset_device(struct rte_emudev *dev);
+
 static int iavfbe_dev_configure(struct rte_eth_dev *dev);
 static int iavfbe_dev_close(struct rte_eth_dev *dev);
 static int iavfbe_dev_start(struct rte_eth_dev *dev);
@@ -41,6 +48,16 @@  static int iavfbe_dev_info_get(struct rte_eth_dev *dev,
 				struct rte_eth_dev_info *dev_info);
 static void iavfbe_destroy_adapter(struct rte_eth_dev *dev);
 
+struct rte_iavf_emu_notify_ops iavfbe_notify_ops = {
+	.device_ready = iavfbe_new_device,
+	.device_destroy = iavfbe_destroy_device,
+	.update_status = iavfbe_update_device,
+	.device_start = NULL,
+	.device_stop = NULL,
+	.lock_dp = iavfbe_lock_dp,
+	.reset_device = iavfbe_reset_device,
+};
+
 static const struct eth_dev_ops iavfbe_eth_dev_ops = {
 	.dev_configure              = iavfbe_dev_configure,
 	.dev_close                  = iavfbe_dev_close,
@@ -51,7 +68,8 @@  static const struct eth_dev_ops iavfbe_eth_dev_ops = {
 };
 
 static int
-iavfbe_dev_info_get(struct rte_eth_dev *dev  __rte_unused,  struct rte_eth_dev_info *dev_info)
+iavfbe_dev_info_get(struct rte_eth_dev *dev  __rte_unused,
+		    struct rte_eth_dev_info *dev_info)
 {
 	dev_info->max_rx_queues = 0;
 	dev_info->max_tx_queues = 0;
@@ -61,7 +79,6 @@  iavfbe_dev_info_get(struct rte_eth_dev *dev  __rte_unused,  struct rte_eth_dev_i
 	return 0;
 }
 
-
 static int
 iavfbe_dev_configure(struct rte_eth_dev *dev __rte_unused)
 {
@@ -122,6 +139,241 @@  iavfbe_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/* Called when emulation device is ready */
+static int
+iavfbe_new_device(struct rte_emudev *dev)
+{
+	struct iavfbe_adapter *adapter =
+		(struct iavfbe_adapter *)dev->backend_priv;
+	struct rte_iavf_emu_mem **mem = &(adapter->mem_table);
+	struct rte_emudev_irq_info irq_info;
+	struct rte_emudev_q_info q_info;
+	struct rte_emudev_db_info db_info;
+	uint64_t addr;
+	uint16_t i;
+
+	if (rte_emudev_get_mem_table(dev->dev_id, (void **)mem)) {
+		IAVF_BE_LOG(ERR, "Can not get mem table\n");
+		return -1;
+	}
+
+	for (i = 0; i < RTE_IAVF_EMU_ADMINQ_NUM; i++) {
+		if (rte_emudev_get_queue_info(dev->dev_id, i, &q_info)) {
+			IAVF_BE_LOG(ERR,
+				"Can not get queue info of qid %d\n", i);
+			return -1;
+		}
+		/*
+		 * Only doorbell of LANQ is viable when device ready.
+		 * Other info of LANQ is acquired through virtchnl.
+		 *
+		 * AdminQ's irq and doorbell will both be ready in this stage.
+		 */
+		if (rte_emudev_get_db_info(dev->dev_id, q_info.doorbell_id,
+					   &db_info)) {
+			IAVF_BE_LOG(ERR,
+				"Can not get doorbell info of qid %d\n", i);
+			return -1;
+		}
+
+		/* Only support memory based doorbell for now */
+		if (db_info.flag & RTE_EMUDEV_DB_FD ||
+			db_info.data.mem.size != 4)
+			return -1;
+
+		if (i == RTE_IAVF_EMU_ADMINQ_TXQ) {
+			adapter->cq_info.asq.tail =
+				(uint8_t *)db_info.data.mem.base;
+		} else {
+			adapter->cq_info.arq.tail =
+				(uint8_t *)db_info.data.mem.base;
+
+			if (rte_emudev_get_irq_info(dev->dev_id,
+				q_info.irq_vector, &irq_info)) {
+				IAVF_BE_LOG(ERR,
+					"Can not get irq info of qid %d\n", i);
+				return -1;
+			}
+
+			adapter->cq_irqfd = irq_info.eventfd;
+		}
+	}
+
+	/* Lan queue info would be set when queue setup */
+
+	if (rte_emudev_get_attr(dev->dev_id, RTE_IAVF_EMU_ATTR_ASQ_HEAD,
+		(rte_emudev_attr_t)&addr)) {
+		IAVF_BE_LOG(ERR, "Can not get asq head\n");
+		return -1;
+	}
+	adapter->cq_info.asq.head = (uint8_t *)(uintptr_t)addr;
+
+	if (rte_emudev_get_attr(dev->dev_id, RTE_IAVF_EMU_ATTR_ARQ_HEAD,
+		(rte_emudev_attr_t)&addr)) {
+		IAVF_BE_LOG(ERR, "Can not get arq head\n");
+		return -1;
+	}
+	adapter->cq_info.arq.head = (uint8_t *)(uintptr_t)addr;
+
+	iavfbe_reset_asq(adapter, false);
+	iavfbe_reset_arq(adapter, false);
+
+	if (rte_emudev_get_attr(dev->dev_id, RTE_IAVF_EMU_ATTR_RESET,
+		(rte_emudev_attr_t)&addr)) {
+		IAVF_BE_LOG(ERR, "Can not get arq head\n");
+		return -1;
+	}
+	adapter->reset = (uint8_t *)(uintptr_t)addr;
+	IAVFBE_WRITE_32(adapter->reset, RTE_IAVF_EMU_RESET_COMPLETED);
+	adapter->started = 1;
+	printf("NEW DEVICE: memtable, %p\n", adapter->mem_table);
+
+	return 0;
+}
+
+static void
+iavfbe_destroy_device(struct rte_emudev *dev)
+{
+	struct iavfbe_adapter *adapter =
+		(struct iavfbe_adapter *)dev->backend_priv;
+
+	/* TODO: Disable all lan queues */
+
+	/* update link status */
+	adapter->link_up = false;
+	iavfbe_dev_link_update(adapter->eth_dev, 0);
+}
+
+static int
+iavfbe_update_device(struct rte_emudev *dev)
+{
+	struct iavfbe_adapter *adapter =
+		(struct iavfbe_adapter *)dev->backend_priv;
+	struct rte_iavf_emu_mem **mem = &(adapter->mem_table);
+	struct rte_emudev_q_info q_info;
+	struct rte_emudev_irq_info irq_info;
+
+	if (rte_emudev_get_mem_table(dev->dev_id, (void **)mem)) {
+		IAVF_BE_LOG(ERR, "Can not get mem table\n");
+		return -1;
+	}
+
+	if (rte_emudev_get_queue_info(dev->dev_id,
+		RTE_IAVF_EMU_ADMINQ_RXQ, &q_info)) {
+		IAVF_BE_LOG(ERR, "Can not get queue info of qid %d\n",
+			RTE_IAVF_EMU_ADMINQ_RXQ);
+		return -1;
+	}
+
+	if (rte_emudev_get_irq_info(dev->dev_id, q_info.irq_vector, &irq_info)) {
+		IAVF_BE_LOG(ERR, "Can not get irq info of qid %d\n",
+			RTE_IAVF_EMU_ADMINQ_RXQ);
+		return -1;
+	}
+
+	/* TODO: Lan queue info update */
+	adapter->cq_irqfd = irq_info.eventfd;
+	rte_atomic32_set(&adapter->irq_enable, irq_info.enable);
+
+	return 0;
+}
+
+static int
+iavfbe_lock_dp(struct rte_emudev *dev, int lock)
+{
+	struct iavfbe_adapter *adapter =
+		(struct iavfbe_adapter *)dev->backend_priv;
+
+	/* Acquire/Release lock of control queue and lan queue */
+
+	if (lock) {
+		/* TODO: Lan queue lock */
+		rte_spinlock_lock(&adapter->cq_info.asq.access_lock);
+		rte_spinlock_lock(&adapter->cq_info.arq.access_lock);
+	} else {
+		/* TODO: Lan queue unlock */
+		rte_spinlock_unlock(&adapter->cq_info.asq.access_lock);
+		rte_spinlock_unlock(&adapter->cq_info.arq.access_lock);
+	}
+
+	return 0;
+}
+
+void
+iavfbe_reset_asq(struct iavfbe_adapter *adapter, bool lock)
+{
+	struct iavfbe_control_q *q;
+
+	q = &adapter->cq_info.asq;
+
+	if (lock)
+		rte_spinlock_lock(&q->access_lock);
+
+	if (q->aq_req)
+		memset(q->aq_req, 0, IAVF_BE_AQ_BUF_SZ);
+	memset(&q->q_info, 0, sizeof(q->q_info));
+	q->ring = NULL;
+	q->p_ring_addr = 0;
+	q->len = 0;
+	q->next_to_clean = 0;
+	q->cmd_retval = 0;
+	if (q->head)
+		IAVFBE_WRITE_32(q->head, 0);
+
+	/* Do not reset tail as it init by FE */
+
+	if (lock)
+		rte_spinlock_unlock(&q->access_lock);
+
+}
+
+void
+iavfbe_reset_arq(struct iavfbe_adapter *adapter, bool lock)
+{
+	struct iavfbe_control_q *q;
+
+	q = &adapter->cq_info.arq;
+
+	if (lock)
+		rte_spinlock_lock(&q->access_lock);
+
+	memset(&q->q_info, 0, sizeof(q->q_info));
+	q->ring = NULL;
+	q->p_ring_addr = 0;
+	q->len = 0;
+	q->next_to_use = 0;
+	if (q->head)
+		IAVFBE_WRITE_32(q->head, 0);
+
+	/* Do not reset tail as it init by FE */
+
+	if (lock)
+		rte_spinlock_unlock(&q->access_lock);
+
+}
+
+static int
+iavfbe_reset_device(struct rte_emudev *dev)
+{
+	struct iavfbe_adapter *adapter =
+		(struct iavfbe_adapter *)dev->backend_priv;
+
+	/* Lock has been acquired by lock_dp */
+	/* TODO: reset all queues */
+	iavfbe_reset_asq(adapter, false);
+	iavfbe_reset_arq(adapter, false);
+
+	adapter->link_up = 0;
+	adapter->unicast_promisc = true;
+	adapter->multicast_promisc = true;
+	adapter->vlan_filter = false;
+	adapter->vlan_strip = false;
+	adapter->cq_irqfd = -1;
+	adapter->adapter_stopped = 1;
+
+	return 0;
+}
+
 static inline int
 save_str(const char *key __rte_unused, const char *value,
 	void *extra_args)
@@ -146,6 +398,34 @@  set_mac(const char *key __rte_unused, const char *value, void *extra_args)
 	return 0;
 }
 
+static int
+iavfbe_driver_admq_session_start(struct rte_eth_dev *eth_dev)
+{
+	struct iavfbe_adapter *adapter =
+		IAVFBE_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	int ret;
+
+	adapter->thread_status = 1;
+	ret = pthread_create(&adapter->thread_id, NULL,
+			     (void *)iavfbe_handle_virtchnl_msg,
+			     eth_dev);
+	if (ret) {
+		IAVF_BE_LOG(ERR, "Can't create a thread\n");
+		adapter->thread_status = 0;
+	}
+	return ret;
+}
+
+static void
+iavfbe_driver_admq_session_stop(struct rte_eth_dev *eth_dev)
+{
+	struct iavfbe_adapter *adapter =
+		IAVFBE_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+
+	adapter->thread_status = 0;
+	pthread_join(adapter->thread_id, NULL);
+}
+
 static int
 iavfbe_init_adapter(struct rte_eth_dev *eth_dev,
 		    struct rte_emudev *emu_dev,
@@ -177,8 +457,44 @@  iavfbe_init_adapter(struct rte_eth_dev *eth_dev,
 		goto err_info;
 
 	adapter->nb_qps = conf->qp_num;
+
+	adapter->cq_info.asq.aq_req =
+		rte_zmalloc_socket("iavfbe", IAVF_BE_AQ_BUF_SZ, 0,
+				   eth_dev->device->numa_node);
+	if (!adapter->cq_info.asq.aq_req) {
+		IAVF_BE_LOG(ERR, "Fail to allocate buffer for"
+				 " control queue request");
+		ret = -ENOMEM;
+		goto err_aq;
+	}
+
+	/* Init lock */
+	rte_spinlock_init(&adapter->cq_info.asq.access_lock);
+	rte_spinlock_init(&adapter->cq_info.arq.access_lock);
+
+	adapter->unicast_promisc = true;
+	adapter->multicast_promisc = true;
+	adapter->vlan_filter = false;
+	adapter->vlan_strip = false;
+
+	/* No need to map region or init admin queue here now. They would be
+	 * done when emu device is ready.*/
+
+	/* Currently RSS is not necessary for device emulator */
+
+	/* Subscribe event from emulated avf device */
+	rte_emudev_subscribe_event(emu_dev->dev_id, &iavfbe_notify_ops);
+
+	/* Create a thread for virtchnnl command process */
+	if (iavfbe_driver_admq_session_start(eth_dev)) {
+		IAVF_BE_LOG(ERR, "iavfbe driver adminq session start failed");
+		goto err_thread;
+	}
+
 	return 0;
 
+err_thread:
+err_aq:
 err_info:
 	rte_free(conf);
 	return ret;
@@ -190,6 +506,7 @@  iavfbe_destroy_adapter(struct rte_eth_dev *dev)
 	struct iavfbe_adapter *adapter =
 		IAVFBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
+	iavfbe_driver_admq_session_stop(dev);
 	if (adapter->emu_dev) {
 		adapter->emu_dev->backend_priv = NULL;
 		rte_wmb();
diff --git a/drivers/net/iavf_be/iavf_be_vchnl.c b/drivers/net/iavf_be/iavf_be_vchnl.c
new file mode 100644
index 0000000000..56b8a485a5
--- /dev/null
+++ b/drivers/net/iavf_be/iavf_be_vchnl.c
@@ -0,0 +1,287 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/eventfd.h>
+
+#include <rte_kvargs.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_dev.h>
+#include <rte_emudev.h>
+#include <rte_iavf_emu.h>
+
+#include <iavf_type.h>
+#include <virtchnl.h>
+
+#include "iavf_be.h"
+
+static inline void
+iavfbe_notify(struct iavfbe_adapter *adapter)
+{
+	if (adapter->cq_irqfd == -1 ||
+	    !rte_atomic32_read(&adapter->irq_enable))
+		return;
+
+	if (eventfd_write(adapter->cq_irqfd, (eventfd_t)1) < 0)
+		IAVF_BE_LOG(ERR, "failed to notify front-end: %s",
+					strerror(errno));
+}
+
+__rte_unused  static int
+iavfbe_send_msg_to_vf(struct iavfbe_adapter *adapter,
+			uint32_t opcode,
+			uint32_t retval,
+			uint8_t *msg,
+			uint16_t msglen)
+{
+	struct iavfbe_control_q *arq = &adapter->cq_info.arq;
+	struct iavf_aq_desc *desc;
+	enum iavf_status status = IAVF_SUCCESS;
+	uint32_t dma_buff_low, dma_buff_high;
+	uint16_t ntu;
+
+	if (msglen > IAVF_BE_AQ_BUF_SZ) {
+		IAVF_BE_LOG(ERR, "ARQ: msg is tool long: %u\n", msglen);
+		status = IAVF_ERR_INVALID_SIZE;
+		goto arq_send_error;
+	}
+
+	rte_spinlock_lock(&arq->access_lock);
+
+	ntu = arq->next_to_use;
+	if (ntu == IAVFBE_READ_32(arq->tail)) {
+		IAVF_BE_LOG(ERR, "ARQ: No free desc\n");
+		status = IAVF_ERR_QUEUE_EMPTY;
+		goto arq_send_error;
+	}
+	desc = &arq->ring[ntu];
+	dma_buff_low = LE32_TO_CPU(desc->params.external.addr_low);
+	dma_buff_high = LE32_TO_CPU(desc->params.external.addr_high);
+
+	/* Prepare descriptor */
+	memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+	desc->opcode = CPU_TO_LE16(iavf_aqc_opc_send_msg_to_vf);
+
+	desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
+	desc->cookie_high = CPU_TO_LE32(opcode);
+	desc->cookie_low = CPU_TO_LE32(retval);
+
+	if (msg && msglen) {
+		void *buf_va;
+		uint64_t buf_sz = msglen;
+
+		desc->flags |= CPU_TO_LE16((uint16_t)(IAVF_AQ_FLAG_BUF
+						| IAVF_AQ_FLAG_RD));
+		if (msglen > IAVF_AQ_LARGE_BUF)
+			desc->flags |= CPU_TO_LE16((uint16_t)IAVF_AQ_FLAG_LB);
+		desc->datalen = CPU_TO_LE16(msglen);
+
+		buf_va = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+			adapter->mem_table,
+			IAVF_BE_32_TO_64(dma_buff_high, dma_buff_low),
+			&buf_sz);
+		if (buf_sz != msglen)
+			goto arq_send_error;
+
+		rte_memcpy(buf_va, msg, msglen);
+	}
+	rte_wmb();
+
+	ntu++;
+	if (ntu == arq->len)
+		ntu = 0;
+	arq->next_to_use = ntu;
+	IAVFBE_WRITE_32(arq->head, arq->next_to_use);
+
+	iavfbe_notify(adapter);
+
+arq_send_error:
+	rte_spinlock_unlock(&arq->access_lock);
+	return status;
+}
+
+/* Read data in admin queue to get msg from vf driver */
+static enum iavf_status
+iavfbe_read_msg_from_vf(struct iavfbe_adapter *adapter,
+			struct iavf_arq_event_info *event)
+{
+	struct iavfbe_control_q *asq = &adapter->cq_info.asq;
+	struct iavf_aq_desc *desc;
+	enum virtchnl_ops opcode;
+	uint16_t ntc;
+	uint16_t datalen;
+	uint16_t flags;
+	int ret = IAVF_SUCCESS;
+
+	rte_spinlock_lock(&asq->access_lock);
+
+	ntc = asq->next_to_clean;
+
+	/* pre-clean the event info */
+	memset(&event->desc, 0, sizeof(event->desc));
+	event->msg_len = 0;
+
+	if (ntc == IAVFBE_READ_32(asq->tail)) {
+		/* nothing to do  */
+		ret = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
+		goto end;
+	}
+	/* now get the next descriptor */
+	desc = &asq->ring[ntc];
+	rte_memcpy(&event->desc, desc, sizeof(struct iavf_aq_desc));
+	flags = LE16_TO_CPU(desc->flags);
+	datalen = LE16_TO_CPU(desc->datalen);
+	if (flags & IAVF_AQ_FLAG_RD) {
+		if (datalen > event->buf_len) {
+			ret = IAVF_ERR_BUF_TOO_SHORT;
+			goto end;
+		} else {
+			uint32_t reg1 = 0;
+			uint32_t reg2 = 0;
+			void *buf_va;
+			uint64_t buf_sz = datalen;
+
+			event->msg_len = datalen;
+			reg1 = LE32_TO_CPU(desc->params.external.addr_low);
+			reg2 = LE32_TO_CPU(desc->params.external.addr_high);
+			buf_va = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+					adapter->mem_table,
+					IAVF_BE_32_TO_64(reg2, reg1), &buf_sz);
+			rte_memcpy(event->msg_buf, buf_va, event->msg_len);
+		}
+	}
+
+	ntc++;
+	if (ntc == asq->len)
+		ntc = 0;
+	asq->next_to_clean = ntc;
+
+	/* Write back to head and Desc with Flags.DD and Flags.CMP */
+	desc->flags |= IAVF_AQ_FLAG_DD | IAVF_AQ_FLAG_CMP;
+	rte_wmb();
+
+	IAVFBE_WRITE_32(asq->head, asq->next_to_clean);
+	opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event->desc.cookie_high);
+	asq->cmd_retval = (enum virtchnl_status_code)
+				rte_le_to_cpu_32(event->desc.cookie_low);
+
+	IAVF_BE_LOG(DEBUG, "AQ from pf carries opcode %u,virtchnl_op %u retval %d",
+		    event->desc.opcode, opcode, asq->cmd_retval);
+end:
+	rte_spinlock_unlock(&asq->access_lock);
+
+	return ret;
+}
+
+static inline int
+iavfbe_control_queue_remap(struct iavfbe_adapter *adapter,
+			  struct iavfbe_control_q *asq,
+			  struct iavfbe_control_q *arq)
+{
+	struct rte_emudev_q_info *asq_info;
+	struct rte_emudev_q_info *arq_info;
+	uint64_t len;
+	int ret;
+
+	asq_info = &adapter->cq_info.asq.q_info;
+	arq_info = &adapter->cq_info.arq.q_info;
+
+	ret = rte_emudev_get_queue_info(adapter->edev_id,
+				     RTE_IAVF_EMU_ADMINQ_TXQ,
+				     asq_info);
+	if (ret)
+		return IAVF_ERR_NOT_READY;
+
+	ret = rte_emudev_get_queue_info(adapter->edev_id,
+					RTE_IAVF_EMU_ADMINQ_RXQ,
+					arq_info);
+	if (ret)
+		return IAVF_ERR_NOT_READY;
+
+	rte_spinlock_lock(&asq->access_lock);
+
+	asq->p_ring_addr = asq_info->base;
+	asq->len = asq_info->size;
+	len = asq->len * sizeof(struct iavf_aq_desc);
+	asq->ring = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+					adapter->mem_table,
+					asq->p_ring_addr, &len);
+	rte_spinlock_unlock(&asq->access_lock);
+
+	rte_spinlock_lock(&arq->access_lock);
+	arq->p_ring_addr = arq_info->base;
+	arq->len = arq_info->size;
+	len = arq->len * sizeof(struct iavf_aq_desc);
+	arq->ring = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+					adapter->mem_table,
+					arq->p_ring_addr, &len);
+	rte_spinlock_unlock(&arq->access_lock);
+
+	return 0;
+}
+
+void
+iavfbe_handle_virtchnl_msg(void *arg)
+{
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
+	struct iavfbe_adapter *adapter =
+		IAVFBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavfbe_control_q *arq = &adapter->cq_info.arq;
+	struct iavfbe_control_q *asq = &adapter->cq_info.asq;
+	struct iavf_arq_event_info info;
+	uint16_t aq_opc;
+	int ret;
+
+	info.buf_len = IAVF_BE_AQ_BUF_SZ;
+	info.msg_buf = adapter->cq_info.asq.aq_req;
+
+	while (adapter->thread_status) {
+		rte_delay_us_sleep(3000); /* sleep for 3 ms*/
+		/* Check if control queue is initilized */
+		if (adapter->started == 0)
+			continue;
+
+		/* remap every time */
+		ret = iavfbe_control_queue_remap(adapter, asq, arq);
+		if (ret ||
+		    !(asq->p_ring_addr && asq->len && asq->ring) ||
+		    !(arq->p_ring_addr && arq->len && arq->ring))
+			continue;
+
+		if (asq->next_to_clean == IAVFBE_READ_32(asq->tail))
+			/* nothing to do  */
+			continue;
+
+		ret = iavfbe_read_msg_from_vf(adapter, &info);
+		if (ret != IAVF_SUCCESS) {
+			IAVF_BE_LOG(DEBUG, "Failed to read msg"
+				    "from AdminQ");
+			break;
+		}
+		aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+
+		switch (aq_opc) {
+		case iavf_aqc_opc_send_msg_to_pf:
+			/* Process msg from VF BE*/
+			break;
+		case iavf_aqc_opc_queue_shutdown:
+			iavfbe_reset_arq(adapter, true);
+			break;
+		case 0:
+			IAVF_BE_LOG(DEBUG, "NULL Request ignored");
+			break;
+		default:
+			IAVF_BE_LOG(ERR, "Unexpected Request 0x%04x ignored ",
+				    aq_opc);
+			break;
+		}
+	}
+	pthread_exit(0);
+}
diff --git a/drivers/net/iavf_be/meson.build b/drivers/net/iavf_be/meson.build
index 24c625fa18..be13a2e492 100644
--- a/drivers/net/iavf_be/meson.build
+++ b/drivers/net/iavf_be/meson.build
@@ -9,4 +9,5 @@  deps += ['bus_vdev', 'common_iavf', 'vfio_user', 'emu_iavf']
 
 sources = files(
 	'iavf_be_ethdev.c',
+	'iavf_be_vchnl.c',
 )