get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/55/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 55,
    "url": "https://patches.dpdk.org/api/patches/55/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1405661946-12534-2-git-send-email-huawei.xie@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1405661946-12534-2-git-send-email-huawei.xie@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1405661946-12534-2-git-send-email-huawei.xie@intel.com",
    "date": "2014-07-18T05:39:05",
    "name": "[dpdk-dev,1/2] lib/librte_vhost: vhost library support to facilitate integration with vswitch.",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "120ff2d327053e008827a5a1a56a6f84ca6de627",
    "submitter": {
        "id": 16,
        "url": "https://patches.dpdk.org/api/people/16/?format=api",
        "name": "Huawei Xie",
        "email": "huawei.xie@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1405661946-12534-2-git-send-email-huawei.xie@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/55/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/55/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<hxie5@shecgisg003.sh.intel.com>",
        "Received": [
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby dpdk.org (Postfix) with ESMTP id 7E594959\n\tfor <dev@dpdk.org>; Fri, 18 Jul 2014 07:38:31 +0200 (CEST)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga101.jf.intel.com with ESMTP; 17 Jul 2014 22:39:29 -0700",
            "from shilc102.sh.intel.com ([10.239.39.44])\n\tby orsmga002.jf.intel.com with ESMTP; 17 Jul 2014 22:39:23 -0700",
            "from shecgisg003.sh.intel.com (shecgisg003.sh.intel.com\n\t[10.239.29.90])\n\tby shilc102.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id\n\ts6I5dIqY008901; Fri, 18 Jul 2014 13:39:20 +0800",
            "from shecgisg003.sh.intel.com (localhost [127.0.0.1])\n\tby shecgisg003.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP\n\tid s6I5dFlJ012654; Fri, 18 Jul 2014 13:39:17 +0800",
            "(from hxie5@localhost)\n\tby shecgisg003.sh.intel.com (8.13.6/8.13.6/Submit) id s6I5dFqf012650; \n\tFri, 18 Jul 2014 13:39:15 +0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.01,682,1400050800\"; d=\"scan'208\";a=\"575049917\"",
        "From": "Huawei Xie <huawei.xie@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Fri, 18 Jul 2014 13:39:05 +0800",
        "Message-Id": "<1405661946-12534-2-git-send-email-huawei.xie@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": "<1405661946-12534-1-git-send-email-huawei.xie@intel.com>",
        "References": "<1405661946-12534-1-git-send-email-huawei.xie@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 1/2] lib/librte_vhost: vhost library support to\n\tfacilitate integration with vswitch.",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "X-List-Received-Date": "Fri, 18 Jul 2014 05:38:34 -0000"
    },
    "content": "Signed-off-by: Huawei Xie <huawei.xie@intel.com>\nAcked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\nAcked-by: Thomos Long <thomas.long@intel.com>\n---\n config/common_linuxapp                       |    6 +\n lib/Makefile                                 |    1 +\n lib/librte_vhost/Makefile                    |   48 ++\n lib/librte_vhost/eventfd_link/Makefile       |   39 +\n lib/librte_vhost/eventfd_link/eventfd_link.c |  205 ++++++\n lib/librte_vhost/eventfd_link/eventfd_link.h |   79 ++\n lib/librte_vhost/rte_virtio_net.h            |  192 +++++\n lib/librte_vhost/vhost-net-cdev.c            |  363 ++++++++++\n lib/librte_vhost/vhost-net-cdev.h            |  112 +++\n lib/librte_vhost/vhost_rxtx.c                |  292 ++++++++\n lib/librte_vhost/virtio-net.c                | 1002 ++++++++++++++++++++++++++\n 11 files changed, 2339 insertions(+)\n create mode 100644 lib/librte_vhost/Makefile\n create mode 100644 lib/librte_vhost/eventfd_link/Makefile\n create mode 100644 lib/librte_vhost/eventfd_link/eventfd_link.c\n create mode 100644 lib/librte_vhost/eventfd_link/eventfd_link.h\n create mode 100644 lib/librte_vhost/rte_virtio_net.h\n create mode 100644 lib/librte_vhost/vhost-net-cdev.c\n create mode 100644 lib/librte_vhost/vhost-net-cdev.h\n create mode 100644 lib/librte_vhost/vhost_rxtx.c\n create mode 100644 lib/librte_vhost/virtio-net.c",
    "diff": "diff --git a/config/common_linuxapp b/config/common_linuxapp\nindex 7bf5d80..002ed84 100644\n--- a/config/common_linuxapp\n+++ b/config/common_linuxapp\n@@ -390,6 +390,12 @@ CONFIG_RTE_KNI_VHOST_DEBUG_RX=n\n CONFIG_RTE_KNI_VHOST_DEBUG_TX=n\n \n #\n+# Compile vhost library\n+#\n+CONFIG_RTE_LIBRTE_VHOST=y\n+CONFIG_RTE_LIBRTE_VHOST_DEBUG=n\n+\n+#\n #Compile Xen domain0 support\n #\n CONFIG_RTE_LIBRTE_XEN_DOM0=n\ndiff --git a/lib/Makefile b/lib/Makefile\nindex 10c5bb3..007c174 100644\n--- a/lib/Makefile\n+++ b/lib/Makefile\n@@ -60,6 +60,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter\n DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched\n DIRS-$(CONFIG_RTE_LIBRTE_KVARGS) += librte_kvargs\n DIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += librte_distributor\n+DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost\n DIRS-$(CONFIG_RTE_LIBRTE_PORT) += librte_port\n DIRS-$(CONFIG_RTE_LIBRTE_TABLE) += librte_table\n DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += librte_pipeline\ndiff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile\nnew file mode 100644\nindex 0000000..f79778b\n--- /dev/null\n+++ b/lib/librte_vhost/Makefile\n@@ -0,0 +1,48 @@\n+#   BSD LICENSE\n+# \n+#   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+#   All rights reserved.\n+# \n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+# \n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of Intel Corporation nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+# \n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+# library name\n+LIB = librte_vhost.a\n+\n+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64 -lfuse\n+LDFLAGS += -lfuse\n+# all source are stored in SRCS-y\n+SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := vhost-net-cdev.c virtio-net.c vhost_rxtx.c\n+\n+# install includes\n+SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_virtio_net.h\n+\n+# this lib needs eal\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_eal lib/librte_mbuf\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/lib/librte_vhost/eventfd_link/Makefile b/lib/librte_vhost/eventfd_link/Makefile\nnew file mode 100644\nindex 0000000..5fe7297\n--- /dev/null\n+++ b/lib/librte_vhost/eventfd_link/Makefile\n@@ -0,0 +1,39 @@\n+#   BSD LICENSE\n+# \n+#   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+#   All rights reserved.\n+# \n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+# \n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of Intel Corporation nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+# \n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ \n+obj-m += eventfd_link.o\n+\n+\n+all:\n+\tmake -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules\n+\n+clean:\n+\tmake -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean\ndiff --git a/lib/librte_vhost/eventfd_link/eventfd_link.c b/lib/librte_vhost/eventfd_link/eventfd_link.c\nnew file mode 100644\nindex 0000000..f7975fa\n--- /dev/null\n+++ b/lib/librte_vhost/eventfd_link/eventfd_link.c\n@@ -0,0 +1,205 @@\n+/*-\n+ *  * GPL LICENSE SUMMARY\n+ *  * \n+ *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *  * \n+ *  *   This program is free software; you can redistribute it and/or modify\n+ *  *   it under the terms of version 2 of the GNU General Public License as\n+ *  *   published by the Free Software Foundation.\n+ *  * \n+ *  *   This program is distributed in the hope that it will be useful, but\n+ *  *   WITHOUT ANY WARRANTY; without even the implied warranty of\n+ *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n+ *  *   General Public License for more details.\n+ *  * \n+ *  *   You should have received a copy of the GNU General Public License\n+ *  *   along with this program; if not, write to the Free Software\n+ *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\n+ *  *   The full GNU General Public License is included in this distribution\n+ *  *   in the file called LICENSE.GPL.\n+ *  * \n+ *  *   Contact Information:\n+ *  *   Intel Corporation\n+ *   */\n+\n+#include <linux/eventfd.h>\n+#include <linux/miscdevice.h>\n+#include <linux/module.h>\n+#include <linux/moduleparam.h>\n+#include <linux/rcupdate.h>\n+#include <linux/file.h>\n+#include <linux/slab.h>\n+#include <linux/fs.h>\n+#include <linux/mmu_context.h>\n+#include <linux/sched.h>\n+#include <asm/mmu_context.h>\n+#include <linux/fdtable.h>\n+\n+#include \"eventfd_link.h\"\n+\n+\n+/*\n+ * get_files_struct is copied from fs/file.c\n+ */\n+struct files_struct *\n+get_files_struct (struct task_struct *task)\n+{\n+\tstruct files_struct *files;\n+\n+\ttask_lock (task);\n+\tfiles = task->files;\n+\tif (files)\n+\t\tatomic_inc (&files->count);\n+\ttask_unlock (task);\n+\n+\treturn files;\n+}\n+\n+/*\n+ * put_files_struct is extracted from fs/file.c\n+ */\n+void\n+put_files_struct (struct files_struct *files)\n+{\n+\tif (atomic_dec_and_test (&files->count))\n+\t{\n+\t\tBUG ();\n+\t}\n+}\n+\n+\n+static long\n+eventfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)\n+{\n+\tvoid __user *argp = (void __user *) arg;\n+\tstruct task_struct *task_target = NULL;\n+\tstruct file *file;\n+\tstruct files_struct *files;\n+\tstruct fdtable *fdt;\n+\tstruct eventfd_copy eventfd_copy;\n+\n+\tswitch (ioctl)\n+\t{\n+\t\tcase EVENTFD_COPY:\n+\t\t\tif (copy_from_user (&eventfd_copy, argp, sizeof (struct eventfd_copy)))\n+\t\t\t\treturn -EFAULT;\n+\n+\t\t\t/*\n+\t\t\t * Find the task struct for the target pid\n+\t\t\t */\n+\t\t\ttask_target =\n+\t\t\t\tpid_task (find_vpid (eventfd_copy.target_pid), PIDTYPE_PID);\n+\t\t\tif (task_target == NULL)\n+\t\t\t{\n+\t\t\t\tprintk (KERN_DEBUG \"Failed to get mem ctx for target pid\\n\");\n+\t\t\t\treturn -EFAULT;\n+\t\t\t}\n+\n+\t\t\tfiles = get_files_struct (current);\n+\t\t\tif (files == NULL)\n+\t\t\t{\n+\t\t\t\tprintk (KERN_DEBUG \"Failed to get files struct\\n\");\n+\t\t\t\treturn -EFAULT;\n+\t\t\t}\n+\n+\t\t\trcu_read_lock ();\n+\t\t\tfile = fcheck_files (files, eventfd_copy.source_fd);\n+\t\t\tif (file)\n+\t\t\t{\n+\t\t\t\tif (file->f_mode & FMODE_PATH\n+\t\t\t\t\t\t|| !atomic_long_inc_not_zero (&file->f_count))\n+\t\t\t\t\tfile = NULL;\n+\t\t\t}\n+\t\t\trcu_read_unlock ();\n+\t\t\tput_files_struct (files);\n+\n+\t\t\tif (file == NULL)\n+\t\t\t{\n+\t\t\t\tprintk (KERN_DEBUG \"Failed to get file from source pid\\n\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\t/*\n+\t\t\t * Release the existing eventfd in the source process\n+\t\t\t */\n+\t\t\tspin_lock (&files->file_lock);\n+\t\t\tfilp_close (file, files);\n+\t\t\tfdt = files_fdtable (files);\n+\t\t\tfdt->fd[eventfd_copy.source_fd] = NULL;\n+\t\t\tspin_unlock (&files->file_lock);\n+\n+\t\t\t/*\n+\t\t\t * Find the file struct associated with the target fd.\n+\t\t\t */\n+\n+\t\t\tfiles = get_files_struct (task_target);\n+\t\t\tif (files == NULL)\n+\t\t\t{\n+\t\t\t\tprintk (KERN_DEBUG \"Failed to get files struct\\n\");\n+\t\t\t\treturn -EFAULT;\n+\t\t\t}\n+\n+\t\t\trcu_read_lock ();\n+\t\t\tfile = fcheck_files (files, eventfd_copy.target_fd);\n+\t\t\tif (file)\n+\t\t\t{\n+\t\t\t\tif (file->f_mode & FMODE_PATH\n+\t\t\t\t\t\t|| !atomic_long_inc_not_zero (&file->f_count))\n+\t\t\t\t\tfile = NULL;\n+\t\t\t}\n+\t\t\trcu_read_unlock ();\n+\t\t\tput_files_struct (files);\n+\n+\t\t\tif (file == NULL)\n+\t\t\t{\n+\t\t\t\tprintk (KERN_DEBUG \"Failed to get file from target pid\\n\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\n+\t\t\t/*\n+\t\t\t * Install the file struct from the target process into the\n+\t\t\t * file desciptor of the source process,\n+\t\t\t */\n+\n+\t\t\tfd_install (eventfd_copy.source_fd, file);\n+\n+\t\t\treturn 0;\n+\n+\t\tdefault:\n+\t\t\treturn -ENOIOCTLCMD;\n+\t}\n+}\n+\n+static const struct file_operations eventfd_link_fops = {\n+\t.owner = THIS_MODULE,\n+\t.unlocked_ioctl = eventfd_link_ioctl,\n+};\n+\n+\n+static struct miscdevice eventfd_link_misc = {\n+\t.name = \"eventfd-link\",\n+\t.fops = &eventfd_link_fops,\n+};\n+\n+static int __init\n+eventfd_link_init (void)\n+{\n+\treturn misc_register (&eventfd_link_misc);\n+}\n+\n+module_init (eventfd_link_init);\n+\n+static void __exit\n+eventfd_link_exit (void)\n+{\n+\tmisc_deregister (&eventfd_link_misc);\n+}\n+\n+module_exit (eventfd_link_exit);\n+\n+MODULE_VERSION (\"0.0.1\");\n+MODULE_LICENSE (\"GPL v2\");\n+MODULE_AUTHOR (\"Anthony Fee\");\n+MODULE_DESCRIPTION (\"Link eventfd\");\n+MODULE_ALIAS (\"devname:eventfd-link\");\ndiff --git a/lib/librte_vhost/eventfd_link/eventfd_link.h b/lib/librte_vhost/eventfd_link/eventfd_link.h\nnew file mode 100644\nindex 0000000..f33c2f8\n--- /dev/null\n+++ b/lib/librte_vhost/eventfd_link/eventfd_link.h\n@@ -0,0 +1,79 @@\n+/*-\n+ *  * This file is provided under a dual BSD/GPLv2 license.  When using or\n+ *  *   redistributing this file, you may do so under either license.\n+ *  * \n+ *  *   GPL LICENSE SUMMARY\n+ *  * \n+ *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *  * \n+ *  *   This program is free software; you can redistribute it and/or modify\n+ *  *   it under the terms of version 2 of the GNU General Public License as\n+ *  *   published by the Free Software Foundation.\n+ *  * \n+ *  *   This program is distributed in the hope that it will be useful, but\n+ *  *   WITHOUT ANY WARRANTY; without even the implied warranty of\n+ *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n+ *  *   General Public License for more details.\n+ *  * \n+ *  *   You should have received a copy of the GNU General Public License\n+ *  *   along with this program; if not, write to the Free Software\n+ *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\n+ *  *   The full GNU General Public License is included in this distribution\n+ *  *   in the file called LICENSE.GPL.\n+ *  * \n+ *  *   Contact Information:\n+ *  *   Intel Corporation\n+ *  * \n+ *  *   BSD LICENSE\n+ *  * \n+ *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *  *   All rights reserved.\n+ *  * \n+ *  *   Redistribution and use in source and binary forms, with or without\n+ *  *   modification, are permitted provided that the following conditions\n+ *  *   are met:\n+ *  * \n+ *  *     * Redistributions of source code must retain the above copyright\n+ *  *       notice, this list of conditions and the following disclaimer.\n+ *  *     * Redistributions in binary form must reproduce the above copyright\n+ *  *       notice, this list of conditions and the following disclaimer in\n+ *  *       the documentation and/or other materials provided with the\n+ *  *       distribution.\n+ *  *     * Neither the name of Intel Corporation nor the names of its\n+ *  *       contributors may be used to endorse or promote products derived\n+ *  *       from this software without specific prior written permission.\n+ *  * \n+ *  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *  *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *  * \n+ *   */\n+\n+#ifndef _EVENTFD_LINK_H_\n+#define _EVENTFD_LINK_H_\n+\n+/*\n+ *\tioctl to copy an fd entry in calling process to an fd in a target process\n+ */\n+#define EVENTFD_COPY 1\n+\n+/*\n+ *\targuements for the EVENTFD_COPY ioctl\n+ */\n+struct eventfd_copy {\n+\t// fd in the target pid\n+    unsigned target_fd;\n+\t// fd in the calling pid\n+    unsigned source_fd;\n+\t// pid of the target pid\n+    pid_t target_pid;\n+};\n+#endif /* _EVENTFD_LINK_H_ */\ndiff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h\nnew file mode 100644\nindex 0000000..7a05dab\n--- /dev/null\n+++ b/lib/librte_vhost/rte_virtio_net.h\n@@ -0,0 +1,192 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef _VIRTIO_NET_H_\n+#define _VIRTIO_NET_H_\n+\n+#include <stdint.h>\n+#include <linux/virtio_ring.h>\n+#include <linux/virtio_net.h>\n+#include <sys/eventfd.h>\n+\n+#include <rte_memory.h>\n+#include <rte_mempool.h>\n+#include <rte_mbuf.h>\n+\n+#define VIRTIO_DEV_RUNNING 1  /**< Used to indicate that the device is running on a data core. */\n+#define VIRTIO_DEV_STOPPED -1 /**< Backend value set by guest. */\n+\n+/* Enum for virtqueue management. */\n+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};\n+\n+/**\n+ * Structure contains variables relevant to RX/TX virtqueues.\n+ */\n+struct vhost_virtqueue {\n+\tstruct vring_desc    *desc;             /**< descriptor ring. */\n+\tstruct vring_avail   *avail;            /**< available ring. */\n+\tstruct vring_used    *used;             /**< used ring. */\n+\tuint32_t             size;              /**< Size of descriptor ring. */\n+\tuint32_t             backend;           /**< Backend value to determine if device should be started/stopped. */\n+\tuint16_t             vhost_hlen;        /**< Vhost header length (varies depending on RX merge buffers. */\n+\tvolatile uint16_t    last_used_idx;     /**< Last index used on the available ring. */\n+\tvolatile uint16_t    last_used_idx_res; /**< Used for multiple devices reserving buffers. */\n+\teventfd_t            callfd;            /**< Currently unused as polling mode is enabled. */\n+\teventfd_t            kickfd;            /**< Used to notify the guest (trigger interrupt). */\n+} __rte_cache_aligned;\n+\n+/**\n+ * Information relating to memory regions including offsets to\n+ * addresses in QEMUs memory file.\n+ */\n+struct virtio_memory_regions {\n+\tuint64_t    guest_phys_address;     /**< Base guest physical address of region. */\n+\tuint64_t    guest_phys_address_end; /**< End guest physical address of region. */\n+\tuint64_t    memory_size;            /**< Size of region. */\n+\tuint64_t    userspace_address;      /**< Base userspace address of region. */\n+\tuint64_t    address_offset;         /**< Offset of region for address translation. */\n+};\n+\n+\n+/**\n+ * Memory structure includes region and mapping information.\n+ */\n+struct virtio_memory {\n+\tuint64_t    base_address;    /**< Base QEMU userspace address of the memory file. */\n+\tuint64_t    mapped_address;  /**< Mapped address of memory file base in our applications memory space. */\n+\tuint64_t    mapped_size;     /**< Total size of memory file. */\n+\tuint32_t    nregions;        /**< Number of memory regions. */\n+\tstruct virtio_memory_regions      regions[0]; /**< Memory region information. */\n+};\n+\n+/**\n+ * Device structure contains all configuration information relating to the device.\n+ */\n+struct virtio_net {\n+\tstruct vhost_virtqueue  *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */\n+\tstruct virtio_memory    *mem;                    /**< QEMU memory and memory region information. */\n+\tuint64_t features;    /**< Negotiated feature set. */\n+\tuint64_t device_fh;   /**< Device identifier. */\n+\tuint32_t flags;       /**< Device flags. Only used to check if device is running on data core. */\n+\tvoid     *priv;\n+} __rte_cache_aligned;\n+\n+/**\n+ * Device operations to add/remove device.\n+ */\n+struct virtio_net_device_ops {\n+\tint (*new_device)(struct virtio_net *); /**< Add device. */\n+\tvoid (*destroy_device)(struct virtio_net *); /**< Remove device. */\n+};\n+\n+\n+static inline uint16_t __attribute__((always_inline))\n+rte_vring_available_entries(struct virtio_net *dev, uint16_t queue_id)\n+{\n+\tstruct vhost_virtqueue *vq = dev->virtqueue[queue_id];\n+\treturn *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx_res;\n+}\n+\n+/**\n+ * Function to convert guest physical addresses to vhost virtual addresses.\n+ * This is used to convert guest virtio buffer addresses.\n+ */\n+static inline uint64_t __attribute__((always_inline))\n+gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)\n+{\n+\tstruct virtio_memory_regions *region;\n+\tuint32_t regionidx;\n+\tuint64_t vhost_va = 0;\n+\n+\tfor (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {\n+\t\tregion = &dev->mem->regions[regionidx];\n+\t\tif ((guest_pa >= region->guest_phys_address) &&\n+\t\t\t(guest_pa <= region->guest_phys_address_end)) {\n+\t\t\tvhost_va = region->address_offset + guest_pa;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\treturn vhost_va;\n+}\n+\n+/**\n+ *  Disable features in feature_mask. Returns 0 on success.\n+ */\n+int rte_vhost_feature_disable(uint64_t feature_mask);\n+\n+/**\n+ *  Enable features in feature_mask. Returns 0 on success.\n+ */\n+int rte_vhost_feature_enable(uint64_t feature_mask);\n+\n+/* Returns currently supported vhost features */\n+uint64_t rte_vhost_feature_get(void);\n+\n+int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable);\n+\n+/* Register vhost driver. dev_name could be different for multiple instance support. */\n+int rte_vhost_driver_register(const char *dev_name);\n+\n+/* Register callbacks. */\n+int rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const);\n+\n+int rte_vhost_driver_session_start(void);\n+\n+/**\n+ * This function adds buffers to the virtio devices RX virtqueue. Buffers can\n+ * be received from the physical port or from another virtual device. A packet\n+ * count is returned to indicate the number of packets that were succesfully\n+ * added to the RX queue.\n+ * @param queue_id\n+ *  virtio queue index in mq case\n+ * @return\n+ *  num of packets enqueued\n+ */\n+uint32_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,\n+\tstruct rte_mbuf **pkts, uint32_t count);\n+\n+/**\n+ * This function gets guest buffers from the virtio device TX virtqueue,\n+ * construct host mbufs, copies guest buffer content to host mbufs and\n+ * store them in pkts to be processed.\n+ * @param mbuf_pool\n+ *  mbuf_pool where host mbuf is allocated.\n+ * @param queue_id\n+ *  virtio queue index in mq case.\n+ * @return\n+ *  num of packets dequeued\n+ */\n+uint32_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,\n+\tstruct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint32_t count);\n+\n+#endif /* _VIRTIO_NET_H_ */\ndiff --git a/lib/librte_vhost/vhost-net-cdev.c b/lib/librte_vhost/vhost-net-cdev.c\nnew file mode 100644\nindex 0000000..1dfe918\n--- /dev/null\n+++ b/lib/librte_vhost/vhost-net-cdev.c\n@@ -0,0 +1,363 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <errno.h>\n+#include <fuse/cuse_lowlevel.h>\n+#include <linux/limits.h>\n+#include <linux/vhost.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+\n+#include <rte_ethdev.h>\n+#include <rte_log.h>\n+#include <rte_string_fns.h>\n+#include <rte_virtio_net.h>\n+\n+#include \"vhost-net-cdev.h\"\n+\n+#define FUSE_OPT_DUMMY    \"\\0\\0\"\n+#define FUSE_OPT_FORE     \"-f\\0\\0\"\n+#define FUSE_OPT_NOMULTI  \"-s\\0\\0\"\n+\n+static const uint32_t\tdefault_major = 231;\n+static const uint32_t\tdefault_minor = 1;\n+static const char\tcuse_device_name[]\t= \"/dev/cuse\";\n+static const char\tdefault_cdev[] = \"vhost-net\";\n+\n+static struct fuse_session\t\t*session;\n+static struct vhost_net_device_ops\tconst *ops;\n+\n+/**\n+ * Returns vhost_device_ctx from given fuse_req_t. The index is populated later when\n+ * the device is added to the device linked list.\n+ */\n+static struct vhost_device_ctx\n+fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)\n+{\n+\tstruct vhost_device_ctx ctx;\n+\tstruct fuse_ctx const *const req_ctx = fuse_req_ctx(req);\n+\n+\tctx.pid = req_ctx->pid;\n+\tctx.fh = fi->fh;\n+\n+\treturn ctx;\n+}\n+\n+/**\n+ * When the device is created in QEMU it gets initialised here and added to the device linked list.\n+ */\n+static void\n+vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)\n+{\n+\tstruct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);\n+\tint err = 0;\n+\n+\terr = ops->new_device(ctx);\n+\tif (err == -1) {\n+\t\tfuse_reply_err(req, EPERM);\n+\t\treturn;\n+\t}\n+\n+\tfi->fh = err;\n+\n+\tRTE_LOG(INFO, VHOST_CONFIG, \"(%\"PRIu64\") Device configuration started\\n\", fi->fh);\n+\tfuse_reply_open(req, fi);\n+}\n+\n+/*\n+ * When QEMU is shutdown or killed the device gets released.\n+ */\n+static void\n+vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)\n+{\n+\tint err = 0;\n+\tstruct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);\n+\n+\tops->destroy_device(ctx);\n+\tRTE_LOG(INFO, VHOST_CONFIG, \"(%\"PRIu64\") Device released\\n\", ctx.fh);\n+\tfuse_reply_err(req, err);\n+}\n+\n+/*\n+ * Boilerplate code for CUSE IOCTL\n+ * Implicit arguments: ctx, req, result.\n+ */\n+#define VHOST_IOCTL(func) do { \\\n+\tresult = (func)(ctx);  \\\n+\tfuse_reply_ioctl(req, result, NULL, 0);\t \\\n+} while (0)\n+\n+/*\n+ * Boilerplate IOCTL RETRY\n+ * Implicit arguments: req.\n+ */\n+#define VHOST_IOCTL_RETRY(size_r, size_w) do { \\\n+\tstruct iovec iov_r = { arg, (size_r) }; \\\n+\tstruct iovec iov_w = { arg, (size_w) }; \\\n+\tfuse_reply_ioctl_retry(req, &iov_r, (size_r) ? 1 : 0, &iov_w, (size_w) ? 1 : 0); \\\n+} while (0)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\n+/*\n+ * Boilerplate code for CUSE Read IOCTL\n+ * Implicit arguments: ctx, req, result, in_bufsz, in_buf.\n+ */\n+#define VHOST_IOCTL_R(type, var, func) do {             \\\n+\tif (!in_bufsz) {                                \\\n+\t\tVHOST_IOCTL_RETRY(sizeof(type), 0);     \\\n+\t} else {                                        \\\n+\t\t(var) = *(const type *)in_buf;          \\\n+\t\tresult = func(ctx, &(var));             \\\n+\t\tfuse_reply_ioctl(req, result, NULL, 0); \\\n+\t}                                               \\\n+} while (0)\n+\n+/*\n+ * Boilerplate code for CUSE Write IOCTL\n+ * Implicit arguments: ctx, req, result, out_bufsz.\n+ */\n+#define\tVHOST_IOCTL_W(type, var, func) do {              \\\n+\tif (!out_bufsz) {                                \\\n+\t\tVHOST_IOCTL_RETRY(0, sizeof(type));      \\\n+\t} else {                                         \\\n+\t\tresult = (func)(ctx, &(var));            \\\n+\t\tfuse_reply_ioctl(req, result, &(var), sizeof(type)); \\\n+\t} \\\n+} while (0)\n+\n+/*\n+ * Boilerplate code for CUSE Read/Write IOCTL\n+ * Implicit arguments: ctx, req, result, in_bufsz, in_buf.\n+ */\n+#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {      \\\n+\tif (!in_bufsz) {                                         \\\n+\t\tVHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \\\n+\t} else {                                                 \\\n+\t\t(var1) = *(const type1 *) (in_buf);              \\\n+\t\tresult = (func)(ctx, (var1), &(var2));           \\\n+\t\tfuse_reply_ioctl(req, result, &(var2), sizeof(type2));  \\\n+\t} \\\n+} while (0)\n+\n+/**\n+ * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on\n+ * the type of IOCTL a buffer is requested to read or to write. This\n+ * request is handled by FUSE and the buffer is then given to CUSE.\n+ */\n+static void\n+vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,\n+\t\tstruct fuse_file_info *fi, __rte_unused unsigned flags,\n+\t\tconst void *in_buf, size_t in_bufsz, size_t out_bufsz)\n+{\n+\tstruct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);\n+\tstruct vhost_vring_file file;\n+\tstruct vhost_vring_state state;\n+\tstruct vhost_vring_addr addr;\n+\tuint64_t features;\n+\tuint32_t index;\n+\tint result = 0;\n+\n+\tswitch (cmd) {\n+\n+\tcase VHOST_NET_SET_BACKEND:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_NET_SET_BACKEND\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);\n+\t\tbreak;\n+\n+\tcase VHOST_GET_FEATURES:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_GET_FEATURES\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_W(uint64_t, features, ops->get_features);\n+\t\tbreak;\n+\n+\tcase VHOST_SET_FEATURES:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_FEATURES\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_R(uint64_t, features, ops->set_features);\n+\t\tbreak;\n+\n+\tcase VHOST_RESET_OWNER:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_RESET_OWNER\\n\", ctx.fh);\n+\t\tVHOST_IOCTL(ops->reset_owner);\n+\t\tbreak;\n+\n+\tcase VHOST_SET_OWNER:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_OWNER\\n\", ctx.fh);\n+\t\tVHOST_IOCTL(ops->set_owner);\n+\t\tbreak;\n+\n+\tcase VHOST_SET_MEM_TABLE:\n+\t\t/*TODO fix race condition.*/\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_MEM_TABLE\\n\", ctx.fh);\n+\t\tstatic struct vhost_memory mem_temp;\n+\n+\t\tswitch (in_bufsz) {\n+\t\tcase 0:\n+\t\t\tVHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);\n+\t\t\tbreak;\n+\n+\t\tcase sizeof(struct vhost_memory):\n+\t\t\tmem_temp = *(const struct vhost_memory *) in_buf;\n+\n+\t\t\tif (mem_temp.nregions > 0) {\n+\t\t\t\tVHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);\n+\t\t\t} else {\n+\t\t\t\tresult = -1;\n+\t\t\t\tfuse_reply_ioctl(req, result, NULL, 0);\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tresult = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);\n+\t\t\tif (result)\n+\t\t\t\tfuse_reply_err(req, EINVAL);\n+\t\t\telse\n+\t\t\t\tfuse_reply_ioctl(req, result, NULL, 0);\n+\n+\t\t}\n+\n+\t\tbreak;\n+\n+\tcase VHOST_SET_VRING_NUM:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_NUM\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);\n+\t\tbreak;\n+\n+\tcase VHOST_SET_VRING_BASE:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_BASE\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);\n+\t\tbreak;\n+\n+\tcase VHOST_GET_VRING_BASE:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_GET_VRING_BASE\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);\n+\t\tbreak;\n+\n+\tcase VHOST_SET_VRING_ADDR:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_ADDR\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);\n+\t\tbreak;\n+\n+\tcase VHOST_SET_VRING_KICK:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_KICK\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);\n+\t\tbreak;\n+\n+\tcase VHOST_SET_VRING_CALL:\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_CALL\\n\", ctx.fh);\n+\t\tVHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: DOESN NOT EXIST\\n\", ctx.fh);\n+\t\tresult = -1;\n+\t\tfuse_reply_ioctl(req, result, NULL, 0);\n+\t}\n+\n+\tif (result < 0)\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: FAIL\\n\", ctx.fh);\n+\telse\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: SUCCESS\\n\", ctx.fh);\n+}\n+\n+/**\n+ * Structure handling open, release and ioctl function pointers is populated.\n+ */\n+static const struct cuse_lowlevel_ops vhost_net_ops = {\n+\t.open\t\t= vhost_net_open,\n+\t.release\t= vhost_net_release,\n+\t.ioctl\t\t= vhost_net_ioctl,\n+};\n+\n+/**\n+ * cuse_info is populated and used to register the cuse device. vhost_net_device_ops are\n+ * also passed when the device is registered in main.c.\n+ */\n+int\n+rte_vhost_driver_register(const char *dev_name)\n+{\n+\tstruct cuse_info cuse_info;\n+\tchar device_name[PATH_MAX] = \"\";\n+\tchar char_device_name[PATH_MAX] = \"\";\n+\tconst char *device_argv[] = { device_name };\n+\n+\tchar fuse_opt_dummy[] = FUSE_OPT_DUMMY;\n+\tchar fuse_opt_fore[] = FUSE_OPT_FORE;\n+\tchar fuse_opt_nomulti[] = FUSE_OPT_NOMULTI;\n+\tchar *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};\n+\n+\tif (access(cuse_device_name, R_OK | W_OK) < 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Character device %s can't be accessed, maybe not exist\\n\", cuse_device_name);\n+\t\treturn -1;\n+\t}\n+\n+\t/*\n+\t * The device name is created. This is passed to QEMU so that it can register\n+\t * the device with our application. The dev_name allows us to have multiple instances\n+\t * of userspace vhost which we can then add devices to separately.\n+\t */\n+\tsnprintf(device_name, PATH_MAX, \"DEVNAME=%s\", dev_name);\n+\tsnprintf(char_device_name, PATH_MAX, \"/dev/%s\", dev_name);\n+\n+\t/* Check if device already exists. */\n+\tif (access(char_device_name, F_OK) != -1) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Character device %s already exists\\n\", char_device_name);\n+\t\treturn -1;\n+\t}\n+\n+\tmemset(&cuse_info, 0, sizeof(cuse_info));\n+\tcuse_info.dev_major = default_major;\n+\tcuse_info.dev_minor = default_minor;\n+\tcuse_info.dev_info_argc = 1;\n+\tcuse_info.dev_info_argv = device_argv;\n+\tcuse_info.flags = CUSE_UNRESTRICTED_IOCTL;\n+\n+\tops = get_virtio_net_callbacks();\n+\n+\tsession = cuse_lowlevel_setup(3, fuse_argv,\n+\t\t\t\t&cuse_info, &vhost_net_ops, 0, NULL);\n+\tif (session == NULL)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+\n+/**\n+ * The CUSE session is launched allowing the application to receive open, release and ioctl calls.\n+ */\n+int\n+rte_vhost_driver_session_start(void)\n+{\n+\tfuse_session_loop(session);\n+\n+\treturn 0;\n+}\ndiff --git a/lib/librte_vhost/vhost-net-cdev.h b/lib/librte_vhost/vhost-net-cdev.h\nnew file mode 100644\nindex 0000000..ecf35fd\n--- /dev/null\n+++ b/lib/librte_vhost/vhost-net-cdev.h\n@@ -0,0 +1,112 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdint.h>\n+#include <stdio.h>\n+#include <sys/types.h>\n+#include <unistd.h>\n+#include <linux/vhost.h>\n+\n+#include <rte_log.h>\n+\n+/* Macros for printing using RTE_LOG */\n+#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1\n+#define RTE_LOGTYPE_VHOST_DATA   RTE_LOGTYPE_USER1\n+\n+#ifdef RTE_LIBRTE_VHOST_DEBUG\n+#define VHOST_MAX_PRINT_BUFF 6072\n+#define LOG_LEVEL RTE_LOG_DEBUG\n+#define LOG_DEBUG(log_type, fmt, args...) do {\t\\\n+\tRTE_LOG(DEBUG, log_type, fmt, ##args);\t\\\n+} while (0)\n+#define VHOST_PRINT_PACKET(device, addr, size, header) do {\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\tchar *pkt_addr = (char*)(addr);\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\tunsigned int index;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\tchar packet[VHOST_MAX_PRINT_BUFF];\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\tif ((header))\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\t\tsnprintf(packet, VHOST_MAX_PRINT_BUFF, \"(%\"PRIu64\") Header size %d: \", (device->device_fh), (size));\t\t\t\t\\\n+\telse\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\t\tsnprintf(packet, VHOST_MAX_PRINT_BUFF, \"(%\"PRIu64\") Packet size %d: \", (device->device_fh), (size));\t\t\t\t\\\n+\tfor (index = 0; index < (size); index++) {\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\t\tsnprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF),\t\\\n+\t\t\t\"%02hhx \", pkt_addr[index]);\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\tsnprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \"\\n\");\t\\\n+\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+\tLOG_DEBUG(VHOST_DATA, \"%s\", packet);\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n+} while(0)\n+#else\n+#define LOG_LEVEL RTE_LOG_INFO\n+#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)\n+#define VHOST_PRINT_PACKET(device, addr, size, header) do{} while(0)\n+#endif\n+\n+/**\n+ * Structure used to identify device context.\n+ */\n+struct vhost_device_ctx\n+{\n+\tpid_t\t\tpid;\t/* PID of process calling the IOCTL. */\n+\tuint64_t \tfh;\t/* Populated with fi->fh to track the device index. */\n+};\n+\n+/**\n+ * Structure contains function pointers to be defined in virtio-net.c. These\n+ * functions are called in CUSE context and are used to configure devices.\n+ */\n+struct vhost_net_device_ops {\n+\tint (* new_device)(struct vhost_device_ctx);\n+\tvoid (* destroy_device)(struct vhost_device_ctx);\n+\n+\tint (* get_features)(struct vhost_device_ctx, uint64_t *);\n+\tint (* set_features)(struct vhost_device_ctx, uint64_t *);\n+\n+\tint (* set_mem_table)(struct vhost_device_ctx, const void *, uint32_t);\n+\n+\tint (* set_vring_num)(struct vhost_device_ctx, struct vhost_vring_state *);\n+\tint (* set_vring_addr)(struct vhost_device_ctx, struct vhost_vring_addr *);\n+\tint (* set_vring_base)(struct vhost_device_ctx, struct vhost_vring_state *);\n+\tint (* get_vring_base)(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);\n+\n+\tint (* set_vring_kick)(struct vhost_device_ctx, struct vhost_vring_file *);\n+\tint (* set_vring_call)(struct vhost_device_ctx, struct vhost_vring_file *);\n+\n+\tint (* set_backend)(struct vhost_device_ctx, struct vhost_vring_file *);\n+\n+\tint (* set_owner)(struct vhost_device_ctx);\n+\tint (* reset_owner)(struct vhost_device_ctx);\n+};\n+\n+\n+struct vhost_net_device_ops const * get_virtio_net_callbacks(void);\ndiff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c\nnew file mode 100644\nindex 0000000..d25457b\n--- /dev/null\n+++ b/lib/librte_vhost/vhost_rxtx.c\n@@ -0,0 +1,292 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdint.h>\n+#include <linux/virtio_net.h>\n+\n+#include <rte_mbuf.h>\n+#include <rte_memcpy.h>\n+#include <rte_virtio_net.h>\n+\n+#include \"vhost-net-cdev.h\"\n+\n+#define VHOST_MAX_PKT_BURST 64\n+#define VHOST_MAX_MRG_PKT_BURST 64\n+\n+\n+uint32_t\n+rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count)\n+{\n+\tstruct vhost_virtqueue *vq;\n+\tstruct vring_desc *desc;\n+\tstruct rte_mbuf *buff;\n+\t/* The virtio_hdr is initialised to 0. */\n+\tstruct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};\n+\tuint64_t buff_addr = 0;\n+\tuint64_t buff_hdr_addr = 0;\n+\tuint32_t head[VHOST_MAX_PKT_BURST], packet_len = 0;\n+\tuint32_t head_idx, packet_success = 0;\n+\tuint32_t mergeable, mrg_count = 0;\n+\tuint16_t avail_idx, res_cur_idx;\n+\tuint16_t res_base_idx, res_end_idx;\n+\tuint16_t free_entries;\n+\tuint8_t success = 0;\n+\n+\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") %s()\\n\", dev->device_fh, __func__);\n+\tif (unlikely(queue_id != VIRTIO_RXQ)) {\n+\t\tLOG_DEBUG(VHOST_DATA, \"mq isn't supported in this version.\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\tvq = dev->virtqueue[VIRTIO_RXQ];\n+\tcount = (count > VHOST_MAX_PKT_BURST) ? VHOST_MAX_PKT_BURST : count;\n+\t/* As many data cores may want access to available buffers, they need to be reserved. */\n+\tdo {\n+\t\tres_base_idx = vq->last_used_idx_res;\n+\t\tavail_idx = *((volatile uint16_t *)&vq->avail->idx);\n+\n+\t\tfree_entries = (avail_idx - res_base_idx);\n+\t\t/*check that we have enough buffers*/\n+\t\tif (unlikely(count > free_entries))\n+\t\t\tcount = free_entries;\n+\n+\t\tif (count == 0)\n+\t\t\treturn 0;\n+\n+\t\tres_end_idx = res_base_idx + count;\n+\t\t/* vq->last_used_idx_res is atomically updated. */\n+\t\t/* TODO: Allow to disable cmpset if no concurrency in application */\n+\t\tsuccess = rte_atomic16_cmpset(&vq->last_used_idx_res,\n+\t\t\t\tres_base_idx, res_end_idx);\n+\t\t/* If there is contention here and failed, try again. */\n+\t} while (unlikely(success == 0));\n+\tres_cur_idx = res_base_idx;\n+\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Current Index %d| End Index %d\\n\",\n+\t\t\tdev->device_fh,\n+\t\t\tres_cur_idx, res_end_idx);\n+\n+\t/* Prefetch available ring to retrieve indexes. */\n+\trte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);\n+\n+\t/* Check if the VIRTIO_NET_F_MRG_RXBUF feature is enabled. */\n+\tmergeable = dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF);\n+\n+\t/* Retrieve all of the head indexes first to avoid caching issues. */\n+\tfor (head_idx = 0; head_idx < count; head_idx++)\n+\t\thead[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];\n+\n+\t/*Prefetch descriptor index. */\n+\trte_prefetch0(&vq->desc[head[packet_success]]);\n+\n+\twhile (res_cur_idx != res_end_idx) {\n+\t\t/* Get descriptor from available ring */\n+\t\tdesc = &vq->desc[head[packet_success]];\n+\n+\t\tbuff = pkts[packet_success];\n+\n+\t\t/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */\n+\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n+\t\t/* Prefetch buffer address. */\n+\t\trte_prefetch0((void *)(uintptr_t)buff_addr);\n+\n+\t\tif (mergeable && (mrg_count != 0)) {\n+\t\t\tdesc->len = packet_len = rte_pktmbuf_data_len(buff);\n+\t\t} else {\n+\t\t\t/* Copy virtio_hdr to packet and increment buffer address */\n+\t\t\tbuff_hdr_addr = buff_addr;\n+\t\t\tpacket_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;\n+\n+\t\t\t/*\n+\t\t\t * If the descriptors are chained the header and data are placed in\n+\t\t\t * separate buffers.\n+\t\t\t */\n+\t\t\tif (desc->flags & VRING_DESC_F_NEXT) {\n+\t\t\t\tdesc->len = vq->vhost_hlen;\n+\t\t\t\tdesc = &vq->desc[desc->next];\n+\t\t\t\t/* Buffer address translation. */\n+\t\t\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n+\t\t\t\tdesc->len = rte_pktmbuf_data_len(buff);\n+\t\t\t} else {\n+\t\t\t\tbuff_addr += vq->vhost_hlen;\n+\t\t\t\tdesc->len = packet_len;\n+\t\t\t}\n+\t\t}\n+\n+\t\tVHOST_PRINT_PACKET(dev, (uintptr_t)buff_addr, rte_pktmbuf_data_len(buff), 0);\n+\n+\t\t/* Update used ring with desc information */\n+\t\tvq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];\n+\t\tvq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;\n+\n+\t\t/* Copy mbuf data to buffer */\n+\t\t/* TODO fixme for sg mbuf and the case that desc couldn't hold the mbuf data */\n+\t\trte_memcpy((void *)(uintptr_t)buff_addr, (const void *)buff->pkt.data, rte_pktmbuf_data_len(buff));\n+\n+\t\tres_cur_idx++;\n+\t\tpacket_success++;\n+\n+\t\t/* If mergeable is disabled then a header is required per buffer. */\n+\t\tif (!mergeable) {\n+\t\t\trte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr, vq->vhost_hlen);\n+\t\t\tVHOST_PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);\n+\t\t} else {\n+\t\t\tmrg_count++;\n+\t\t\t/* Merge buffer can only handle so many buffers at a time. Tell the guest if this limit is reached. */\n+\t\t\tif ((mrg_count == VHOST_MAX_MRG_PKT_BURST) || (res_cur_idx == res_end_idx)) {\n+\t\t\t\tvirtio_hdr.num_buffers = mrg_count;\n+\t\t\t\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") RX: Num merge buffers %d\\n\", dev->device_fh, virtio_hdr.num_buffers);\n+\t\t\t\trte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr, vq->vhost_hlen);\n+\t\t\t\tVHOST_PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);\n+\t\t\t\tmrg_count = 0;\n+\t\t\t}\n+\t\t}\n+\t\tif (res_cur_idx < res_end_idx) {\n+\t\t\t/* Prefetch descriptor index. */\n+\t\t\trte_prefetch0(&vq->desc[head[packet_success]]);\n+\t\t}\n+\t}\n+\n+\trte_compiler_barrier();\n+\n+\t/* Wait until it's our turn to add our buffer to the used ring. */\n+\twhile (unlikely(vq->last_used_idx != res_base_idx))\n+\t\trte_pause();\n+\n+\t*(volatile uint16_t *)&vq->used->idx += count;\n+\tvq->last_used_idx = res_end_idx;\n+\n+\t/* Kick the guest if necessary. */\n+\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n+\t\teventfd_write((int)vq->kickfd, 1);\n+\treturn count;\n+}\n+\n+\n+uint32_t\n+rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint32_t count)\n+{\n+\tstruct rte_mbuf *mbuf;\n+\tstruct vhost_virtqueue *vq;\n+\tstruct vring_desc *desc;\n+\tuint64_t buff_addr = 0;\n+\tuint32_t head[VHOST_MAX_PKT_BURST];\n+\tuint32_t used_idx;\n+\tuint32_t i;\n+\tuint16_t free_entries, packet_success = 0;\n+\tuint16_t avail_idx;\n+\n+\tif (unlikely(queue_id != VIRTIO_TXQ)) {\n+\t\tLOG_DEBUG(VHOST_DATA, \"mq isn't supported in this version.\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\tvq = dev->virtqueue[VIRTIO_TXQ];\n+\tavail_idx =  *((volatile uint16_t *)&vq->avail->idx);\n+\n+\t/* If there are no available buffers then return. */\n+\tif (vq->last_used_idx == avail_idx)\n+\t\treturn 0;\n+\n+\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") virtio_dev_tx()\\n\", dev->device_fh);\n+\n+\t/* Prefetch available ring to retrieve head indexes. */\n+\trte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);\n+\n+\t/*get the number of free entries in the ring*/\n+\tfree_entries = (avail_idx - vq->last_used_idx);\n+\n+\tif (free_entries > count)\n+\t\tfree_entries = count;\n+\t/* Limit to MAX_PKT_BURST. */\n+\tif (free_entries > VHOST_MAX_PKT_BURST)\n+\t\tfree_entries = VHOST_MAX_PKT_BURST;\n+\n+\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Buffers available %d\\n\", dev->device_fh, free_entries);\n+\t/* Retrieve all of the head indexes first to avoid caching issues. */\n+\tfor (i = 0; i < free_entries; i++)\n+\t\thead[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];\n+\n+\t/* Prefetch descriptor index. */\n+\trte_prefetch0(&vq->desc[head[packet_success]]);\n+\trte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);\n+\n+\twhile (packet_success < free_entries) {\n+\t\tdesc = &vq->desc[head[packet_success]];\n+\n+\t\t/* Discard first buffer as it is the virtio header */\n+\t\tdesc = &vq->desc[desc->next];\n+\n+\t\t/* Buffer address translation. */\n+\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n+\t\t/* Prefetch buffer address. */\n+\t\trte_prefetch0((void *)(uintptr_t)buff_addr);\n+\n+\t\tused_idx = vq->last_used_idx & (vq->size - 1);\n+\n+\t\tif (packet_success < (free_entries - 1)) {\n+\t\t\t/* Prefetch descriptor index. */\n+\t\t\trte_prefetch0(&vq->desc[head[packet_success+1]]);\n+\t\t\trte_prefetch0(&vq->used->ring[(used_idx + 1) & (vq->size - 1)]);\n+\t\t}\n+\n+\t\t/* Update used index buffer information. */\n+\t\tvq->used->ring[used_idx].id = head[packet_success];\n+\t\tvq->used->ring[used_idx].len = 0;\n+\n+\t\tmbuf = rte_pktmbuf_alloc(mbuf_pool);\n+\t\tif (unlikely(mbuf == NULL)) {\n+\t\t\tRTE_LOG(ERR, VHOST_DATA, \"Failed to allocate memory for mbuf.\\n\");\n+\t\t\treturn packet_success;\n+\t\t}\n+\t\tmbuf->pkt.data_len = desc->len;\n+\t\tmbuf->pkt.pkt_len  = mbuf->pkt.data_len;\n+\n+\t\trte_memcpy((void *) mbuf->pkt.data,\n+\t\t\t(const void *) buff_addr, mbuf->pkt.data_len);\n+\n+\t\tpkts[packet_success] = mbuf;\n+\n+\t\tVHOST_PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);\n+\n+\t\tvq->last_used_idx++;\n+\t\tpacket_success++;\n+\t}\n+\n+\trte_compiler_barrier();\n+\tvq->used->idx += packet_success;\n+\t/* Kick guest if required. */\n+\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n+\t\teventfd_write((int)vq->kickfd, 1);\n+\n+\treturn packet_success;\n+}\ndiff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c\nnew file mode 100644\nindex 0000000..ccda8e9\n--- /dev/null\n+++ b/lib/librte_vhost/virtio-net.c\n@@ -0,0 +1,1002 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <dirent.h>\n+#include <fuse/cuse_lowlevel.h>\n+#include <linux/vhost.h>\n+#include <linux/virtio_net.h>\n+#include <stddef.h>\n+#include <stdint.h>\n+#include <stdlib.h>\n+#include <sys/eventfd.h>\n+#include <sys/ioctl.h>\n+#include <sys/mman.h>\n+#include <unistd.h>\n+\n+#include <rte_ethdev.h>\n+#include <rte_log.h>\n+#include <rte_string_fns.h>\n+#include <rte_memory.h>\n+#include <rte_virtio_net.h>\n+\n+#include \"vhost-net-cdev.h\"\n+#include \"eventfd_link/eventfd_link.h\"\n+\n+/**\n+ * Device linked list structure for configuration.\n+ */\n+struct virtio_net_config_ll {\n+\tstruct virtio_net             dev;    /* Virtio device. */\n+\tstruct virtio_net_config_ll   *next;  /* Next entry on linked list. */\n+};\n+\n+static const char eventfd_cdev[] = \"/dev/eventfd-link\";\n+\n+/* device ops to add/remove device to data core. */\n+static struct virtio_net_device_ops const *notify_ops;\n+/* Root address of the linked list in the configuration core. */\n+static struct virtio_net_config_ll *ll_root;\n+\n+/* Features supported by this library. */\n+#define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)\n+static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;\n+\n+/* Line size for reading maps file. */\n+static const uint32_t BUFSIZE = PATH_MAX;\n+\n+/* Size of prot char array in procmap. */\n+#define PROT_SZ 5\n+\n+/* Number of elements in procmap struct. */\n+#define PROCMAP_SZ 8\n+\n+/* Structure containing information gathered from maps file. */\n+struct procmap {\n+\tuint64_t    va_start;         /* Start virtual address in file. */\n+\tuint64_t    len;              /* Size of file. */\n+\tuint64_t    pgoff;            /* Not used. */\n+\tuint32_t    maj;              /* Not used. */\n+\tuint32_t    min;              /* Not used. */\n+\tuint32_t    ino;              /* Not used. */\n+\tchar        prot[PROT_SZ];    /* Not used. */\n+\tchar        fname[PATH_MAX];  /* File name. */\n+};\n+\n+/**\n+ * Converts QEMU virtual address to Vhost virtual address. This function is used\n+ * to convert the ring addresses to our address space.\n+ */\n+static uint64_t\n+qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)\n+{\n+\tstruct virtio_memory_regions *region;\n+\tuint64_t vhost_va = 0;\n+\tuint32_t regionidx = 0;\n+\n+\t/* Find the region where the address lives. */\n+\tfor (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {\n+\t\tregion = &dev->mem->regions[regionidx];\n+\t\tif ((qemu_va >= region->userspace_address) &&\n+\t\t\t(qemu_va <= region->userspace_address +\n+\t\t\tregion->memory_size)) {\n+\t\t\tvhost_va = dev->mem->mapped_address + qemu_va - dev->mem->base_address;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\treturn vhost_va;\n+}\n+\n+/**\n+ * Locate the file containing QEMU's memory space and map it to our address space.\n+ */\n+static int\n+host_memory_map(struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr)\n+{\n+\tstruct dirent *dptr = NULL;\n+\tstruct procmap procmap;\n+\tDIR *dp = NULL;\n+\tint fd;\n+\tint i;\n+\tchar memfile[PATH_MAX];\n+\tchar mapfile[PATH_MAX];\n+\tchar procdir[PATH_MAX];\n+\tchar resolved_path[PATH_MAX];\n+\tFILE *fmap;\n+\tvoid *map;\n+\tuint8_t\tfound = 0;\n+\tchar line[BUFSIZE];\n+\tchar dlm[] = \"-   :   \";\n+\tchar *str, *sp, *in[PROCMAP_SZ];\n+\tchar *end = NULL;\n+\n+\t/* Path where mem files are located. */\n+\tsnprintf(procdir, PATH_MAX, \"/proc/%u/fd/\", pid);\n+\t/* Maps file used to locate mem file. */\n+\tsnprintf(mapfile, PATH_MAX, \"/proc/%u/maps\", pid);\n+\n+\tfmap = fopen(mapfile, \"r\");\n+\tif (fmap == NULL) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to open maps file for pid %d\\n\", dev->device_fh, pid);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Read through maps file until we find out base_address. */\n+\twhile (fgets(line, BUFSIZE, fmap) != 0) {\n+\t\tstr = line;\n+\t\terrno = 0;\n+\t\t/* Split line in to fields. */\n+\t\tfor (i = 0; i < PROCMAP_SZ; i++) {\n+\t\t\tin[i] = strtok_r(str, &dlm[i], &sp);\n+\t\t\tif ((in[i] == NULL) || (errno != 0)) {\n+\t\t\t\tfclose(fmap);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tstr = NULL;\n+\t\t}\n+\n+\t\t/* Convert/Copy each field as needed. */\n+\t\tprocmap.va_start = strtoull(in[0], &end, 16);\n+\t\tif ((in[0] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n+\t\t\tfclose(fmap);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tprocmap.len = strtoull(in[1], &end, 16);\n+\t\tif ((in[1] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n+\t\t\tfclose(fmap);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tprocmap.pgoff = strtoull(in[3], &end, 16);\n+\t\tif ((in[3] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n+\t\t\tfclose(fmap);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tprocmap.maj = strtoul(in[4], &end, 16);\n+\t\tif ((in[4] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n+\t\t\tfclose(fmap);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tprocmap.min = strtoul(in[5], &end, 16);\n+\t\tif ((in[5] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n+\t\t\tfclose(fmap);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tprocmap.ino = strtoul(in[6], &end, 16);\n+\t\tif ((in[6] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n+\t\t\tfclose(fmap);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tmemcpy(&procmap.prot, in[2], PROT_SZ);\n+\t\tmemcpy(&procmap.fname, in[7], PATH_MAX);\n+\n+\t\tif (procmap.va_start == addr) {\n+\t\t\tprocmap.len = procmap.len - procmap.va_start;\n+\t\t\tfound = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\tfclose(fmap);\n+\n+\tif (!found) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find memory file in pid %d maps file\\n\", dev->device_fh, pid);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Find the guest memory file among the process fds. */\n+\tdp = opendir(procdir);\n+\tif (dp == NULL) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Cannot open pid %d process directory \\n\", dev->device_fh, pid);\n+\t\treturn -1;\n+\n+\t}\n+\n+\tfound = 0;\n+\n+\t/* Read the fd directory contents. */\n+\twhile (NULL != (dptr = readdir(dp))) {\n+\t\tsnprintf(memfile, PATH_MAX, \"/proc/%u/fd/%s\", pid, dptr->d_name);\n+\t\trealpath(memfile, resolved_path);\n+\t\tif (resolved_path == NULL) {\n+\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to resolve fd directory\\n\", dev->device_fh);\n+\t\t\tclosedir(dp);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (strncmp(resolved_path, procmap.fname,\n+\t\t\tstrnlen(procmap.fname, PATH_MAX)) == 0) {\n+\t\t\tfound = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tclosedir(dp);\n+\n+\tif (found == 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find memory file for pid %d\\n\", dev->device_fh, pid);\n+\t\treturn -1;\n+\t}\n+\t/* Open the shared memory file and map the memory into this process. */\n+\tfd = open(memfile, O_RDWR);\n+\n+\tif (fd == -1) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to open %s for pid %d\\n\", dev->device_fh, memfile, pid);\n+\t\treturn -1;\n+\t}\n+\n+\tmap = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE , MAP_POPULATE|MAP_SHARED, fd, 0);\n+\tclose(fd);\n+\n+\tif (map == MAP_FAILED) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Error mapping the file %s for pid %d\\n\",  dev->device_fh, memfile, pid);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Store the memory address and size in the device data structure */\n+\tmem->mapped_address = (uint64_t)(uintptr_t)map;\n+\tmem->mapped_size = procmap.len;\n+\n+\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Mem File: %s->%s - Size: %llu - VA: %p\\n\", dev->device_fh,\n+\t\tmemfile, resolved_path, (long long unsigned)mem->mapped_size, map);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ *  Initialise all variables in device structure.\n+ */\n+static void\n+init_device(struct virtio_net *dev)\n+{\n+\tuint64_t vq_offset;\n+\n+\t/* Virtqueues have already been malloced so we don't want to set them to NULL. */\n+\tvq_offset = offsetof(struct virtio_net, mem);\n+\n+\t/* Set everything to 0. */\n+\tmemset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,\n+\t\t(sizeof(struct virtio_net) - (size_t)vq_offset));\n+\tmemset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));\n+\tmemset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));\n+\n+\t/* Backends are set to -1 indicating an inactive device. */\n+\tdev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;\n+\tdev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;\n+}\n+\n+/**\n+ * Unmap any memory, close any file descriptors and free any memory owned by a device.\n+ */\n+static void\n+cleanup_device(struct virtio_net *dev)\n+{\n+\t/* Unmap QEMU memory file if mapped. */\n+\tif (dev->mem) {\n+\t\tmunmap((void *)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);\n+\t\tfree(dev->mem);\n+\t}\n+\n+\t/* Close any event notifiers opened by device. */\n+\tif (dev->virtqueue[VIRTIO_RXQ]->callfd)\n+\t\tclose((int)dev->virtqueue[VIRTIO_RXQ]->callfd);\n+\tif (dev->virtqueue[VIRTIO_RXQ]->kickfd)\n+\t\tclose((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);\n+\tif (dev->virtqueue[VIRTIO_TXQ]->callfd)\n+\t\tclose((int)dev->virtqueue[VIRTIO_TXQ]->callfd);\n+\tif (dev->virtqueue[VIRTIO_TXQ]->kickfd)\n+\t\tclose((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);\n+}\n+\n+/**\n+ * Release virtqueues and device memory.\n+ */\n+static void\n+free_device(struct virtio_net_config_ll *ll_dev)\n+{\n+\t/* Free any malloc'd memory */\n+\tfree(ll_dev->dev.virtqueue[VIRTIO_RXQ]);\n+\tfree(ll_dev->dev.virtqueue[VIRTIO_TXQ]);\n+\tfree(ll_dev);\n+}\n+\n+/**\n+ * Retrieves an entry from the devices configuration linked list.\n+ */\n+static struct virtio_net_config_ll *\n+get_config_ll_entry(struct vhost_device_ctx ctx)\n+{\n+\tstruct virtio_net_config_ll *ll_dev = ll_root;\n+\n+\t/* Loop through linked list until the device_fh is found. */\n+\twhile (ll_dev != NULL) {\n+\t\tif (ll_dev->dev.device_fh == ctx.fh)\n+\t\t\treturn ll_dev;\n+\t\tll_dev = ll_dev->next;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * Searches the configuration core linked list and retrieves the device if it exists.\n+ */\n+static struct virtio_net *\n+get_device(struct vhost_device_ctx ctx)\n+{\n+\tstruct virtio_net_config_ll *ll_dev;\n+\n+\tll_dev = get_config_ll_entry(ctx);\n+\n+\t/* If a matching entry is found in the linked list, return the device in that entry. */\n+\tif (ll_dev)\n+\t\treturn &ll_dev->dev;\n+\n+\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Device not found in linked list.\\n\", ctx.fh);\n+\treturn NULL;\n+}\n+\n+/**\n+ * Add entry containing a device to the device configuration linked list.\n+ */\n+static void\n+add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)\n+{\n+\tstruct virtio_net_config_ll *ll_dev = ll_root;\n+\n+\t/* If ll_dev == NULL then this is the first device so go to else */\n+\tif (ll_dev) {\n+\t\t/* If the 1st device_fh != 0 then we insert our device here. */\n+\t\tif (ll_dev->dev.device_fh != 0)\t{\n+\t\t\tnew_ll_dev->dev.device_fh = 0;\n+\t\t\tnew_ll_dev->next = ll_dev;\n+\t\t\tll_root = new_ll_dev;\n+\t\t} else {\n+\t\t\t/* Increment through the ll until we find un unused device_fh. Insert the device at that entry*/\n+\t\t\twhile ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))\n+\t\t\t\tll_dev = ll_dev->next;\n+\n+\t\t\tnew_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;\n+\t\t\tnew_ll_dev->next = ll_dev->next;\n+\t\t\tll_dev->next = new_ll_dev;\n+\t\t}\n+\t} else {\n+\t\tll_root = new_ll_dev;\n+\t\tll_root->dev.device_fh = 0;\n+\t}\n+\n+}\n+\n+/**\n+ * Remove an entry from the device configuration linked list.\n+ */\n+static struct virtio_net_config_ll *\n+rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last)\n+{\n+\t/* First remove the device and then clean it up. */\n+\tif (ll_dev == ll_root) {\n+\t\tll_root = ll_dev->next;\n+\t\tcleanup_device(&ll_dev->dev);\n+\t\tfree_device(ll_dev);\n+\t\treturn ll_root;\n+\t} else {\n+\t\tif (likely(ll_dev_last != NULL)) {\n+\t\t\tll_dev_last->next = ll_dev->next;\n+\t\t\tcleanup_device(&ll_dev->dev);\n+\t\t\tfree_device(ll_dev);\n+\t\t\treturn ll_dev_last->next;\n+\t\t} else {\n+\t\t\tcleanup_device(&ll_dev->dev);\n+\t\t\tfree_device(ll_dev);\n+\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Remove entry from config_ll failed\\n\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+}\n+\n+\n+/**\n+ * Function is called from the CUSE open function. The device structure is\n+ * initialised and a new entry is added to the device configuration linked\n+ * list.\n+ */\n+static int\n+new_device(struct vhost_device_ctx ctx)\n+{\n+\tstruct virtio_net_config_ll *new_ll_dev;\n+\tstruct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;\n+\n+\t/* Setup device and virtqueues. */\n+\tnew_ll_dev = malloc(sizeof(struct virtio_net_config_ll));\n+\tif (new_ll_dev == NULL) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for dev.\\n\", ctx.fh);\n+\t\treturn -1;\n+\t}\n+\n+\tvirtqueue_rx = malloc(sizeof(struct vhost_virtqueue));\n+\tif (virtqueue_rx == NULL) {\n+\t\tfree(new_ll_dev);\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for virtqueue_rx.\\n\", ctx.fh);\n+\t\treturn -1;\n+\t}\n+\n+\tvirtqueue_tx = malloc(sizeof(struct vhost_virtqueue));\n+\tif (virtqueue_tx == NULL) {\n+\t\tfree(virtqueue_rx);\n+\t\tfree(new_ll_dev);\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for virtqueue_tx.\\n\", ctx.fh);\n+\t\treturn -1;\n+\t}\n+\n+\tnew_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;\n+\tnew_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;\n+\n+\t/* Initialise device and virtqueues. */\n+\tinit_device(&new_ll_dev->dev);\n+\n+\tnew_ll_dev->next = NULL;\n+\n+\t/* Add entry to device configuration linked list. */\n+\tadd_config_ll_entry(new_ll_dev);\n+\n+\treturn new_ll_dev->dev.device_fh;\n+}\n+\n+/**\n+ * Function is called from the CUSE release function. This function will cleanup\n+ * the device and remove it from device configuration linked list.\n+ */\n+static void\n+destroy_device(struct vhost_device_ctx ctx)\n+{\n+\tstruct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;\n+\tstruct virtio_net_config_ll *ll_dev_cur = ll_root;\n+\n+\t/* Find the linked list entry for the device to be removed. */\n+\tll_dev_cur_ctx = get_config_ll_entry(ctx);\n+\twhile (ll_dev_cur != NULL) {\n+\t\t/* If the device is found or a device that doesn't exist is found then it is removed. */\n+\t\tif (ll_dev_cur == ll_dev_cur_ctx) {\n+\t\t\t/*\n+\t\t\t * If the device is running on a data core then call the function to remove it from\n+\t\t\t * the data core.\n+\t\t\t */\n+\t\t\tif ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))\n+\t\t\t\tnotify_ops->destroy_device(&(ll_dev_cur->dev));\n+\t\t\tll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);\n+\t\t\t/*TODO return here? */\n+\t\t} else {\n+\t\t\tll_dev_last = ll_dev_cur;\n+\t\t\tll_dev_cur = ll_dev_cur->next;\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_OWNER\n+ * This function just returns success at the moment unless the device hasn't been initialised.\n+ */\n+static int\n+set_owner(struct vhost_device_ctx ctx)\n+{\n+\tstruct virtio_net *dev;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_RESET_OWNER\n+ */\n+static int\n+reset_owner(struct vhost_device_ctx ctx)\n+{\n+\tstruct virtio_net_config_ll *ll_dev;\n+\n+\tll_dev = get_config_ll_entry(ctx);\n+\n+\tcleanup_device(&ll_dev->dev);\n+\tinit_device(&ll_dev->dev);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_GET_FEATURES\n+ * The features that we support are requested.\n+ */\n+static int\n+get_features(struct vhost_device_ctx ctx, uint64_t *pu)\n+{\n+\tstruct virtio_net *dev;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\t/* Send our supported features. */\n+\t*pu = VHOST_FEATURES;\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_FEATURES\n+ * We receive the negotiated set of features supported by us and the virtio device.\n+ */\n+static int\n+set_features(struct vhost_device_ctx ctx, uint64_t *pu)\n+{\n+\tstruct virtio_net *dev;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\tif (*pu & ~VHOST_FEATURES)\n+\t\treturn -1;\n+\n+\t/* Store the negotiated feature list for the device. */\n+\tdev->features = *pu;\n+\n+\t/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */\n+\tif (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Mergeable RX buffers enabled\\n\", dev->device_fh);\n+\t\tdev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n+\t\tdev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n+\t} else {\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Mergeable RX buffers disabled\\n\", dev->device_fh);\n+\t\tdev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);\n+\t\tdev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);\n+\t}\n+\treturn 0;\n+}\n+\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE\n+ * This function creates and populates the memory structure for the device. This includes\n+ * storing offsets used to translate buffer addresses.\n+ */\n+static int\n+set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_t nregions)\n+{\n+\tstruct virtio_net *dev;\n+\tstruct vhost_memory_region *mem_regions;\n+\tstruct virtio_memory *mem;\n+\tuint64_t size = offsetof(struct vhost_memory, regions);\n+\tuint32_t regionidx, valid_regions;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\tif (dev->mem) {\n+\t\tmunmap((void *)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);\n+\t\tfree(dev->mem);\n+\t}\n+\n+\t/* Malloc the memory structure depending on the number of regions. */\n+\tmem = calloc(1, sizeof(struct virtio_memory) + (sizeof(struct virtio_memory_regions) * nregions));\n+\tif (mem == NULL) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for dev->mem.\\n\", dev->device_fh);\n+\t\treturn -1;\n+\t}\n+\n+\tmem->nregions = nregions;\n+\n+\tmem_regions = (void *)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size);\n+\n+\tfor (regionidx = 0; regionidx < mem->nregions; regionidx++) {\n+\t\t/* Populate the region structure for each region. */\n+\t\tmem->regions[regionidx].guest_phys_address = mem_regions[regionidx].guest_phys_addr;\n+\t\tmem->regions[regionidx].guest_phys_address_end = mem->regions[regionidx].guest_phys_address +\n+\t\t\tmem_regions[regionidx].memory_size;\n+\t\tmem->regions[regionidx].memory_size = mem_regions[regionidx].memory_size;\n+\t\tmem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr;\n+\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%\"PRIu64\")\\n\", dev->device_fh,\n+\t\t\t\tregionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,\n+\t\t\t\t(void *)(uintptr_t)mem->regions[regionidx].userspace_address,\n+\t\t\t\tmem->regions[regionidx].memory_size);\n+\n+\t\t/*set the base address mapping*/\n+\t\tif (mem->regions[regionidx].guest_phys_address == 0x0) {\n+\t\t\tmem->base_address = mem->regions[regionidx].userspace_address;\n+\t\t\t/* Map VM memory file */\n+\t\t\tif (host_memory_map(dev, mem, ctx.pid, mem->base_address) != 0) {\n+\t\t\t\tfree(mem);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* Check that we have a valid base address. */\n+\tif (mem->base_address == 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find base address of qemu memory file.\\n\", dev->device_fh);\n+\t\tfree(mem);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Check if all of our regions have valid mappings. Usually one does not exist in the QEMU memory file. */\n+\tvalid_regions = mem->nregions;\n+\tfor (regionidx = 0; regionidx < mem->nregions; regionidx++) {\n+\t\tif ((mem->regions[regionidx].userspace_address < mem->base_address) ||\n+\t\t\t(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size)))\n+\t\t\t\tvalid_regions--;\n+\t}\n+\n+\t/* If a region does not have a valid mapping we rebuild our memory struct to contain only valid entries. */\n+\tif (valid_regions != mem->nregions) {\n+\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\\n\",\n+\t\t\tdev->device_fh);\n+\n+\t\t/* Re-populate the memory structure with only valid regions. Invalid regions are over-written with memmove. */\n+\t\tvalid_regions = 0;\n+\n+\t\tfor (regionidx = mem->nregions; 0 != regionidx--;) {\n+\t\t\tif ((mem->regions[regionidx].userspace_address < mem->base_address) ||\n+\t\t\t\t\t(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size))) {\n+\t\t\t\tmemmove(&mem->regions[regionidx], &mem->regions[regionidx + 1],\n+\t\t\t\t\tsizeof(struct virtio_memory_regions) * valid_regions);\n+\t\t\t} else {\n+\t\t\t\tvalid_regions++;\n+\t\t\t}\n+\t\t}\n+\t}\n+\tmem->nregions = valid_regions;\n+\tdev->mem = mem;\n+\n+\t/*\n+\t * Calculate the address offset for each region. This offset is used to identify the vhost virtual address\n+\t * corresponding to a QEMU guest physical address.\n+\t */\n+\tfor (regionidx = 0; regionidx < dev->mem->nregions; regionidx++)\n+\t\tdev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address\n+\t\t\t+ dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_VRING_NUM\n+ * The virtio device sends us the size of the descriptor ring.\n+ */\n+static int\n+set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)\n+{\n+\tstruct virtio_net *dev;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\t/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n+\tdev->virtqueue[state->index]->size = state->num;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR\n+ * The virtio device sends us the desc, used and avail ring addresses. This function\n+ * then converts these to our address space.\n+ */\n+static int\n+set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)\n+{\n+\tstruct virtio_net *dev;\n+\tstruct vhost_virtqueue *vq;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\t/* addr->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n+\tvq = dev->virtqueue[addr->index];\n+\n+\t/* The addresses are converted from QEMU virtual to Vhost virtual. */\n+\tvq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);\n+\tif (vq->desc == 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find descriptor ring address.\\n\", dev->device_fh);\n+\t\treturn -1;\n+\t}\n+\n+\tvq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);\n+\tif (vq->avail == 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find available ring address.\\n\", dev->device_fh);\n+\t\treturn -1;\n+\t}\n+\n+\tvq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);\n+\tif (vq->used == 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find used ring address.\\n\", dev->device_fh);\n+\t\treturn -1;\n+\t}\n+\n+\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") mapped address desc: %p\\n\", dev->device_fh, vq->desc);\n+\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") mapped address avail: %p\\n\", dev->device_fh, vq->avail);\n+\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") mapped address used: %p\\n\", dev->device_fh, vq->used);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_VRING_BASE\n+ * The virtio device sends us the available ring last used index.\n+ */\n+static int\n+set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)\n+{\n+\tstruct virtio_net *dev;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\t/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n+\tdev->virtqueue[state->index]->last_used_idx = state->num;\n+\tdev->virtqueue[state->index]->last_used_idx_res = state->num;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_GET_VRING_BASE\n+ * We send the virtio device our available ring last used index.\n+ */\n+static int\n+get_vring_base(struct vhost_device_ctx ctx, uint32_t index, struct vhost_vring_state *state)\n+{\n+\tstruct virtio_net *dev;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\tstate->index = index;\n+\t/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n+\tstate->num = dev->virtqueue[state->index]->last_used_idx;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * This function uses the eventfd_link kernel module to copy an eventfd file descriptor\n+ * provided by QEMU in to our process space.\n+ */\n+static int\n+eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)\n+{\n+\tint eventfd_link, ret;\n+\n+\t/* Open the character device to the kernel module. */\n+\teventfd_link = open(eventfd_cdev, O_RDWR);\n+\tif (eventfd_link < 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") eventfd_link module is not loaded\\n\",  dev->device_fh);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Call the IOCTL to copy the eventfd. */\n+\tret = ioctl(eventfd_link, EVENTFD_COPY, eventfd_copy);\n+\tclose(eventfd_link);\n+\n+\tif (ret < 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") EVENTFD_COPY ioctl failed\\n\",  dev->device_fh);\n+\t\treturn -1;\n+\t}\n+\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_VRING_CALL\n+ * The virtio device sends an eventfd to interrupt the guest. This fd gets copied in\n+ * to our process space.\n+ */\n+static int\n+set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)\n+{\n+\tstruct virtio_net *dev;\n+\tstruct eventfd_copy\teventfd_kick;\n+\tstruct vhost_virtqueue *vq;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\t/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n+\tvq = dev->virtqueue[file->index];\n+\n+\tif (vq->kickfd)\n+\t\tclose((int)vq->kickfd);\n+\n+\t/* Populate the eventfd_copy structure and call eventfd_copy. */\n+\tvq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);\n+\teventfd_kick.source_fd = vq->kickfd;\n+\teventfd_kick.target_fd = file->fd;\n+\teventfd_kick.target_pid = ctx.pid;\n+\n+\tif (eventfd_copy(dev, &eventfd_kick))\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_SET_VRING_KICK\n+ * The virtio device sends an eventfd that it can use to notify us. This fd gets copied in\n+ * to our process space.\n+ */\n+static int\n+set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)\n+{\n+\tstruct virtio_net *dev;\n+\tstruct eventfd_copy eventfd_call;\n+\tstruct vhost_virtqueue *vq;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\t/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n+\tvq = dev->virtqueue[file->index];\n+\n+\tif (vq->callfd)\n+\t\tclose((int)vq->callfd);\n+\n+\t/* Populate the eventfd_copy structure and call eventfd_copy. */\n+\tvq->callfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);\n+\teventfd_call.source_fd = vq->callfd;\n+\teventfd_call.target_fd = file->fd;\n+\teventfd_call.target_pid = ctx.pid;\n+\n+\tif (eventfd_copy(dev, &eventfd_call))\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND\n+ * To complete device initialisation when the virtio driver is loaded we are provided with a\n+ * valid fd for a tap device (not used by us). If this happens then we can add the device to a\n+ * data core. When the virtio driver is removed we get fd=-1. At that point we remove the device\n+ * from the data core. The device will still exist in the device configuration linked list.\n+ */\n+static int\n+set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)\n+{\n+\tstruct virtio_net *dev;\n+\n+\tdev = get_device(ctx);\n+\tif (dev == NULL)\n+\t\treturn -1;\n+\n+\t/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n+\tdev->virtqueue[file->index]->backend = file->fd;\n+\n+\t/* If the device isn't already running and both backend fds are set we add the device. */\n+\tif (!(dev->flags & VIRTIO_DEV_RUNNING)) {\n+\t\tif (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&\n+\t\t\t((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED))\n+\t\t\treturn notify_ops->new_device(dev);\n+\t/* Otherwise we remove it. */\n+\t} else\n+\t\tif (file->fd == VIRTIO_DEV_STOPPED)\n+\t\t\tnotify_ops->destroy_device(dev);\n+\treturn 0;\n+}\n+\n+/**\n+ * Function pointers are set for the device operations to allow CUSE to call functions\n+ * when an IOCTL, device_add or device_release is received.\n+ */\n+static const struct vhost_net_device_ops vhost_device_ops = {\n+\t.new_device = new_device,\n+\t.destroy_device = destroy_device,\n+\n+\t.get_features = get_features,\n+\t.set_features = set_features,\n+\n+\t.set_mem_table = set_mem_table,\n+\n+\t.set_vring_num = set_vring_num,\n+\t.set_vring_addr = set_vring_addr,\n+\t.set_vring_base = set_vring_base,\n+\t.get_vring_base = get_vring_base,\n+\n+\t.set_vring_kick = set_vring_kick,\n+\t.set_vring_call = set_vring_call,\n+\n+\t.set_backend = set_backend,\n+\n+\t.set_owner = set_owner,\n+\t.reset_owner = reset_owner,\n+};\n+\n+/**\n+ * Called by main to setup callbacks when registering CUSE device.\n+ */\n+struct vhost_net_device_ops const *\n+get_virtio_net_callbacks(void)\n+{\n+\treturn &vhost_device_ops;\n+}\n+\n+int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable)\n+{\n+\tif (enable) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"guest notification isn't supported.\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tdev->virtqueue[queue_id]->used->flags = enable ? 0 : VRING_USED_F_NO_NOTIFY;\n+\treturn 0;\n+}\n+\n+uint64_t rte_vhost_feature_get(void)\n+{\n+\treturn VHOST_FEATURES;\n+}\n+\n+int rte_vhost_feature_disable(uint64_t feature_mask)\n+{\n+\tVHOST_FEATURES = VHOST_FEATURES & ~feature_mask;\n+\treturn 0;\n+}\n+\n+int rte_vhost_feature_enable(uint64_t feature_mask)\n+{\n+\tif ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {\n+\t\tVHOST_FEATURES = VHOST_FEATURES | feature_mask;\n+\t\treturn 0;\n+\t}\n+\treturn -1;\n+}\n+\n+\n+/*\n+ * Register ops so that we can add/remove device to data core.\n+ */\n+int\n+rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)\n+{\n+\tnotify_ops = ops;\n+\n+\treturn 0;\n+}\n",
    "prefixes": [
        "dpdk-dev",
        "1/2"
    ]
}