get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/294/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 294,
    "url": "http://patches.dpdk.org/api/patches/294/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1409648131-4301-2-git-send-email-huawei.xie@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1409648131-4301-2-git-send-email-huawei.xie@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1409648131-4301-2-git-send-email-huawei.xie@intel.com",
    "date": "2014-09-02T08:55:29",
    "name": "[dpdk-dev,1/3] examples/vhost: remove vhost example",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "bfb0d9e9ec24c598115f386daa92f41fceb8e226",
    "submitter": {
        "id": 16,
        "url": "http://patches.dpdk.org/api/people/16/?format=api",
        "name": "Huawei Xie",
        "email": "huawei.xie@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1409648131-4301-2-git-send-email-huawei.xie@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/294/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/294/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<hxie5@shecgisg003.sh.intel.com>",
        "Received": [
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id 6243F683B\n\tfor <dev@dpdk.org>; Tue,  2 Sep 2014 10:51:24 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga103.fm.intel.com with ESMTP; 02 Sep 2014 01:47:38 -0700",
            "from shvmail01.sh.intel.com ([10.239.29.42])\n\tby FMSMGA003.fm.intel.com with ESMTP; 02 Sep 2014 01:51:13 -0700",
            "from shecgisg003.sh.intel.com (shecgisg003.sh.intel.com\n\t[10.239.29.90])\n\tby shvmail01.sh.intel.com with ESMTP id s828ti9V027256;\n\tTue, 2 Sep 2014 16:55:44 +0800",
            "from shecgisg003.sh.intel.com (localhost [127.0.0.1])\n\tby shecgisg003.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP\n\tid s828tg8W004417; Tue, 2 Sep 2014 16:55:44 +0800",
            "(from hxie5@localhost)\n\tby shecgisg003.sh.intel.com (8.13.6/8.13.6/Submit) id s828tfAS004413; \n\tTue, 2 Sep 2014 16:55:41 +0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"4.97,862,1389772800\"; d=\"scan'208\";a=\"380180836\"",
        "From": "Huawei Xie <huawei.xie@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Tue,  2 Sep 2014 16:55:29 +0800",
        "Message-Id": "<1409648131-4301-2-git-send-email-huawei.xie@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": "<1409648131-4301-1-git-send-email-huawei.xie@intel.com>",
        "References": "<1409648131-4301-1-git-send-email-huawei.xie@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 1/3] examples/vhost: remove vhost example",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "X-List-Received-Date": "Tue, 02 Sep 2014 08:51:26 -0000"
    },
    "content": "Signed-off-by: Huawei Xie <huawei.xie@intel.com>\nAcked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\nAcked-by: Thomos Long <thomas.long@intel.com>\n---\n examples/vhost/Makefile                    |   60 -\n examples/vhost/eventfd_link/Makefile       |   39 -\n examples/vhost/eventfd_link/eventfd_link.c |  205 --\n examples/vhost/eventfd_link/eventfd_link.h |   79 -\n examples/vhost/libvirt/qemu-wrap.py        |  367 ---\n examples/vhost/main.c                      | 3722 ----------------------------\n examples/vhost/main.h                      |   86 -\n examples/vhost/vhost-net-cdev.c            |  367 ---\n examples/vhost/vhost-net-cdev.h            |   83 -\n examples/vhost/virtio-net.c                | 1165 ---------\n examples/vhost/virtio-net.h                |  161 --\n 11 files changed, 6334 deletions(-)\n delete mode 100644 examples/vhost/Makefile\n delete mode 100644 examples/vhost/eventfd_link/Makefile\n delete mode 100644 examples/vhost/eventfd_link/eventfd_link.c\n delete mode 100644 examples/vhost/eventfd_link/eventfd_link.h\n delete mode 100755 examples/vhost/libvirt/qemu-wrap.py\n delete mode 100644 examples/vhost/main.c\n delete mode 100644 examples/vhost/main.h\n delete mode 100644 examples/vhost/vhost-net-cdev.c\n delete mode 100644 examples/vhost/vhost-net-cdev.h\n delete mode 100644 examples/vhost/virtio-net.c\n delete mode 100644 examples/vhost/virtio-net.h",
    "diff": "diff --git a/examples/vhost/Makefile b/examples/vhost/Makefile\ndeleted file mode 100644\nindex f45f83f..0000000\n--- a/examples/vhost/Makefile\n+++ /dev/null\n@@ -1,60 +0,0 @@\n-#   BSD LICENSE\n-#\n-#   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n-#   All rights reserved.\n-#\n-#   Redistribution and use in source and binary forms, with or without\n-#   modification, are permitted provided that the following conditions\n-#   are met:\n-#\n-#     * Redistributions of source code must retain the above copyright\n-#       notice, this list of conditions and the following disclaimer.\n-#     * Redistributions in binary form must reproduce the above copyright\n-#       notice, this list of conditions and the following disclaimer in\n-#       the documentation and/or other materials provided with the\n-#       distribution.\n-#     * Neither the name of Intel Corporation nor the names of its\n-#       contributors may be used to endorse or promote products derived\n-#       from this software without specific prior written permission.\n-#\n-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n-#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n-\n-ifeq ($(RTE_SDK),)\n-$(error \"Please define RTE_SDK environment variable\")\n-endif\n-\n-# Default target, can be overriden by command line or environment\n-RTE_TARGET ?= x86_64-native-linuxapp-gcc\n-\n-include $(RTE_SDK)/mk/rte.vars.mk\n-\n-ifneq ($(CONFIG_RTE_EXEC_ENV),\"linuxapp\")\n-$(info This application can only operate in a linuxapp environment, \\\n-please change the definition of the RTE_TARGET environment variable)\n-all:\n-else\n-\n-# binary name\n-APP = vhost-switch\n-\n-# all source are stored in SRCS-y\n-#SRCS-y := cusedrv.c loopback-userspace.c\n-SRCS-y := main.c virtio-net.c vhost-net-cdev.c\n-\n-CFLAGS += -O2 -I/usr/local/include -D_FILE_OFFSET_BITS=64 -Wno-unused-parameter\n-CFLAGS += $(WERROR_FLAGS)\n-LDFLAGS += -lfuse\n-\n-include $(RTE_SDK)/mk/rte.extapp.mk\n-\n-endif\ndiff --git a/examples/vhost/eventfd_link/Makefile b/examples/vhost/eventfd_link/Makefile\ndeleted file mode 100644\nindex fc3927b..0000000\n--- a/examples/vhost/eventfd_link/Makefile\n+++ /dev/null\n@@ -1,39 +0,0 @@\n-#   BSD LICENSE\n-#\n-#   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n-#   All rights reserved.\n-#\n-#   Redistribution and use in source and binary forms, with or without\n-#   modification, are permitted provided that the following conditions\n-#   are met:\n-#\n-#     * Redistributions of source code must retain the above copyright\n-#       notice, this list of conditions and the following disclaimer.\n-#     * Redistributions in binary form must reproduce the above copyright\n-#       notice, this list of conditions and the following disclaimer in\n-#       the documentation and/or other materials provided with the\n-#       distribution.\n-#     * Neither the name of Intel Corporation nor the names of its\n-#       contributors may be used to endorse or promote products derived\n-#       from this software without specific prior written permission.\n-#\n-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n-#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n-\n-obj-m += eventfd_link.o\n-\n-\n-all:\n-\tmake -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules\n-\n-clean:\n-\tmake -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean\ndiff --git a/examples/vhost/eventfd_link/eventfd_link.c b/examples/vhost/eventfd_link/eventfd_link.c\ndeleted file mode 100644\nindex fc0653a..0000000\n--- a/examples/vhost/eventfd_link/eventfd_link.c\n+++ /dev/null\n@@ -1,205 +0,0 @@\n-/*-\n- *  * GPL LICENSE SUMMARY\n- *  *\n- *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *  *\n- *  *   This program is free software; you can redistribute it and/or modify\n- *  *   it under the terms of version 2 of the GNU General Public License as\n- *  *   published by the Free Software Foundation.\n- *  *\n- *  *   This program is distributed in the hope that it will be useful, but\n- *  *   WITHOUT ANY WARRANTY; without even the implied warranty of\n- *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n- *  *   General Public License for more details.\n- *  *\n- *  *   You should have received a copy of the GNU General Public License\n- *  *   along with this program; if not, write to the Free Software\n- *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\n- *  *   The full GNU General Public License is included in this distribution\n- *  *   in the file called LICENSE.GPL.\n- *  *\n- *  *   Contact Information:\n- *  *   Intel Corporation\n- *   */\n-\n-#include <linux/eventfd.h>\n-#include <linux/miscdevice.h>\n-#include <linux/module.h>\n-#include <linux/moduleparam.h>\n-#include <linux/rcupdate.h>\n-#include <linux/file.h>\n-#include <linux/slab.h>\n-#include <linux/fs.h>\n-#include <linux/mmu_context.h>\n-#include <linux/sched.h>\n-#include <asm/mmu_context.h>\n-#include <linux/fdtable.h>\n-\n-#include \"eventfd_link.h\"\n-\n-\n-/*\n- * get_files_struct is copied from fs/file.c\n- */\n-struct files_struct *\n-get_files_struct (struct task_struct *task)\n-{\n-\tstruct files_struct *files;\n-\n-\ttask_lock (task);\n-\tfiles = task->files;\n-\tif (files)\n-\t\tatomic_inc (&files->count);\n-\ttask_unlock (task);\n-\n-\treturn files;\n-}\n-\n-/*\n- * put_files_struct is extracted from fs/file.c\n- */\n-void\n-put_files_struct (struct files_struct *files)\n-{\n-\tif (atomic_dec_and_test (&files->count))\n-\t{\n-\t\tBUG ();\n-\t}\n-}\n-\n-\n-static long\n-eventfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)\n-{\n-\tvoid __user *argp = (void __user *) arg;\n-\tstruct task_struct *task_target = NULL;\n-\tstruct file *file;\n-\tstruct files_struct *files;\n-\tstruct fdtable *fdt;\n-\tstruct eventfd_copy eventfd_copy;\n-\n-\tswitch (ioctl)\n-\t{\n-\t\tcase EVENTFD_COPY:\n-\t\t\tif (copy_from_user (&eventfd_copy, argp, sizeof (struct eventfd_copy)))\n-\t\t\t\treturn -EFAULT;\n-\n-\t\t\t/*\n-\t\t\t * Find the task struct for the target pid\n-\t\t\t */\n-\t\t\ttask_target =\n-\t\t\t\tpid_task (find_vpid (eventfd_copy.target_pid), PIDTYPE_PID);\n-\t\t\tif (task_target == NULL)\n-\t\t\t{\n-\t\t\t\tprintk (KERN_DEBUG \"Failed to get mem ctx for target pid\\n\");\n-\t\t\t\treturn -EFAULT;\n-\t\t\t}\n-\n-\t\t\tfiles = get_files_struct (current);\n-\t\t\tif (files == NULL)\n-\t\t\t{\n-\t\t\t\tprintk (KERN_DEBUG \"Failed to get files struct\\n\");\n-\t\t\t\treturn -EFAULT;\n-\t\t\t}\n-\n-\t\t\trcu_read_lock ();\n-\t\t\tfile = fcheck_files (files, eventfd_copy.source_fd);\n-\t\t\tif (file)\n-\t\t\t{\n-\t\t\t\tif (file->f_mode & FMODE_PATH\n-\t\t\t\t\t\t|| !atomic_long_inc_not_zero (&file->f_count))\n-\t\t\t\t\tfile = NULL;\n-\t\t\t}\n-\t\t\trcu_read_unlock ();\n-\t\t\tput_files_struct (files);\n-\n-\t\t\tif (file == NULL)\n-\t\t\t{\n-\t\t\t\tprintk (KERN_DEBUG \"Failed to get file from source pid\\n\");\n-\t\t\t\treturn 0;\n-\t\t\t}\n-\n-\t\t\t/*\n-\t\t\t * Release the existing eventfd in the source process\n-\t\t\t */\n-\t\t\tspin_lock (&files->file_lock);\n-\t\t\tfilp_close (file, files);\n-\t\t\tfdt = files_fdtable (files);\n-\t\t\tfdt->fd[eventfd_copy.source_fd] = NULL;\n-\t\t\tspin_unlock (&files->file_lock);\n-\n-\t\t\t/*\n-\t\t\t * Find the file struct associated with the target fd.\n-\t\t\t */\n-\n-\t\t\tfiles = get_files_struct (task_target);\n-\t\t\tif (files == NULL)\n-\t\t\t{\n-\t\t\t\tprintk (KERN_DEBUG \"Failed to get files struct\\n\");\n-\t\t\t\treturn -EFAULT;\n-\t\t\t}\n-\n-\t\t\trcu_read_lock ();\n-\t\t\tfile = fcheck_files (files, eventfd_copy.target_fd);\n-\t\t\tif (file)\n-\t\t\t{\n-\t\t\t\tif (file->f_mode & FMODE_PATH\n-\t\t\t\t\t\t|| !atomic_long_inc_not_zero (&file->f_count))\n-\t\t\t\t\tfile = NULL;\n-\t\t\t}\n-\t\t\trcu_read_unlock ();\n-\t\t\tput_files_struct (files);\n-\n-\t\t\tif (file == NULL)\n-\t\t\t{\n-\t\t\t\tprintk (KERN_DEBUG \"Failed to get file from target pid\\n\");\n-\t\t\t\treturn 0;\n-\t\t\t}\n-\n-\n-\t\t\t/*\n-\t\t\t * Install the file struct from the target process into the\n-\t\t\t * file desciptor of the source process,\n-\t\t\t */\n-\n-\t\t\tfd_install (eventfd_copy.source_fd, file);\n-\n-\t\t\treturn 0;\n-\n-\t\tdefault:\n-\t\t\treturn -ENOIOCTLCMD;\n-\t}\n-}\n-\n-static const struct file_operations eventfd_link_fops = {\n-\t.owner = THIS_MODULE,\n-\t.unlocked_ioctl = eventfd_link_ioctl,\n-};\n-\n-\n-static struct miscdevice eventfd_link_misc = {\n-\t.name = \"eventfd-link\",\n-\t.fops = &eventfd_link_fops,\n-};\n-\n-static int __init\n-eventfd_link_init (void)\n-{\n-\treturn misc_register (&eventfd_link_misc);\n-}\n-\n-module_init (eventfd_link_init);\n-\n-static void __exit\n-eventfd_link_exit (void)\n-{\n-\tmisc_deregister (&eventfd_link_misc);\n-}\n-\n-module_exit (eventfd_link_exit);\n-\n-MODULE_VERSION (\"0.0.1\");\n-MODULE_LICENSE (\"GPL v2\");\n-MODULE_AUTHOR (\"Anthony Fee\");\n-MODULE_DESCRIPTION (\"Link eventfd\");\n-MODULE_ALIAS (\"devname:eventfd-link\");\ndiff --git a/examples/vhost/eventfd_link/eventfd_link.h b/examples/vhost/eventfd_link/eventfd_link.h\ndeleted file mode 100644\nindex a32a8dd..0000000\n--- a/examples/vhost/eventfd_link/eventfd_link.h\n+++ /dev/null\n@@ -1,79 +0,0 @@\n-/*-\n- *  * This file is provided under a dual BSD/GPLv2 license.  When using or\n- *  *   redistributing this file, you may do so under either license.\n- *  *\n- *  *   GPL LICENSE SUMMARY\n- *  *\n- *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *  *\n- *  *   This program is free software; you can redistribute it and/or modify\n- *  *   it under the terms of version 2 of the GNU General Public License as\n- *  *   published by the Free Software Foundation.\n- *  *\n- *  *   This program is distributed in the hope that it will be useful, but\n- *  *   WITHOUT ANY WARRANTY; without even the implied warranty of\n- *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n- *  *   General Public License for more details.\n- *  *\n- *  *   You should have received a copy of the GNU General Public License\n- *  *   along with this program; if not, write to the Free Software\n- *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\n- *  *   The full GNU General Public License is included in this distribution\n- *  *   in the file called LICENSE.GPL.\n- *  *\n- *  *   Contact Information:\n- *  *   Intel Corporation\n- *  *\n- *  *   BSD LICENSE\n- *  *\n- *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *  *   All rights reserved.\n- *  *\n- *  *   Redistribution and use in source and binary forms, with or without\n- *  *   modification, are permitted provided that the following conditions\n- *  *   are met:\n- *  *\n- *  *     * Redistributions of source code must retain the above copyright\n- *  *       notice, this list of conditions and the following disclaimer.\n- *  *     * Redistributions in binary form must reproduce the above copyright\n- *  *       notice, this list of conditions and the following disclaimer in\n- *  *       the documentation and/or other materials provided with the\n- *  *       distribution.\n- *  *     * Neither the name of Intel Corporation nor the names of its\n- *  *       contributors may be used to endorse or promote products derived\n- *  *       from this software without specific prior written permission.\n- *  *\n- *  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *  *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- *  *\n- *   */\n-\n-#ifndef _EVENTFD_LINK_H_\n-#define _EVENTFD_LINK_H_\n-\n-/*\n- *\tioctl to copy an fd entry in calling process to an fd in a target process\n- */\n-#define EVENTFD_COPY 1\n-\n-/*\n- *\targuements for the EVENTFD_COPY ioctl\n- */\n-struct eventfd_copy {\n-\t// fd in the target pid\n-    unsigned target_fd;\n-\t// fd in the calling pid\n-    unsigned source_fd;\n-\t// pid of the target pid\n-    pid_t target_pid;\n-};\n-#endif /* _EVENTFD_LINK_H_ */\ndiff --git a/examples/vhost/libvirt/qemu-wrap.py b/examples/vhost/libvirt/qemu-wrap.py\ndeleted file mode 100755\nindex e2d68a0..0000000\n--- a/examples/vhost/libvirt/qemu-wrap.py\n+++ /dev/null\n@@ -1,367 +0,0 @@\n-#!/usr/bin/python\n-#/*\n-# *   BSD LICENSE\n-# *\n-# *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n-# *   All rights reserved.\n-# *\n-# *   Redistribution and use in source and binary forms, with or without\n-# *   modification, are permitted provided that the following conditions\n-# *   are met:\n-# *\n-# *     * Redistributions of source code must retain the above copyright\n-# *       notice, this list of conditions and the following disclaimer.\n-# *     * Redistributions in binary form must reproduce the above copyright\n-# *       notice, this list of conditions and the following disclaimer in\n-# *       the documentation and/or other materials provided with the\n-# *       distribution.\n-# *     * Neither the name of Intel Corporation nor the names of its\n-# *       contributors may be used to endorse or promote products derived\n-# *       from this software without specific prior written permission.\n-# *\n-# *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n-# *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n-# *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n-# *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n-# *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n-# *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n-# *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n-# *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n-# *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n-# *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-# *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n-# */\n-\n-#####################################################################\n-# This script is designed to modify the call to the QEMU emulator\n-# to support userspace vhost when starting a guest machine through\n-# libvirt with vhost enabled. The steps to enable this are as follows\n-# and should be run as root:\n-#\n-# 1. Place this script in a libvirtd's binary search PATH ($PATH)\n-#    A good location would be in the same directory that the QEMU\n-#    binary is located\n-#\n-# 2. Ensure that the script has the same owner/group and file\n-#    permissions as the QEMU binary\n-#\n-# 3. Update the VM xml file using \"virsh edit VM.xml\"\n-#\n-#    3.a) Set the VM to use the launch script\n-#\n-#    \tSet the emulator path contained in the\n-#\t\t<emulator><emulator/> tags\n-#\n-#    \te.g replace <emulator>/usr/bin/qemu-kvm<emulator/>\n-#        with    <emulator>/usr/bin/qemu-wrap.py<emulator/>\n-#\n-#\t 3.b) Set the VM's device's to use vhost-net offload\n-#\n-#\t\t<interface type=\"network\">\n-#       \t<model type=\"virtio\"/>\n-#       \t<driver name=\"vhost\"/>\n-#\t\t<interface/>\n-#\n-# 4. Enable libvirt to access our userpace device file by adding it to\n-#    controllers cgroup for libvirtd using the following steps\n-#\n-#   4.a) In /etc/libvirt/qemu.conf add/edit the following lines:\n-#         1) cgroup_controllers = [ ... \"devices\", ... ]\n-#\t\t  2) clear_emulator_capabilities = 0\n-#         3) user = \"root\"\n-#         4) group = \"root\"\n-#         5) cgroup_device_acl = [\n-#                \"/dev/null\", \"/dev/full\", \"/dev/zero\",\n-#                \"/dev/random\", \"/dev/urandom\",\n-#                \"/dev/ptmx\", \"/dev/kvm\", \"/dev/kqemu\",\n-#                \"/dev/rtc\", \"/dev/hpet\", \"/dev/net/tun\",\n-#                \"/dev/<devbase-name>-<index>\",\n-#            ]\n-#\n-#   4.b) Disable SELinux or set to permissive mode\n-#\n-#   4.c) Mount cgroup device controller\n-#        \"mkdir /dev/cgroup\"\n-#        \"mount -t cgroup none /dev/cgroup -o devices\"\n-#\n-#   4.d) Set hugetlbfs_mount variable - ( Optional )\n-#        VMs using userspace vhost must use hugepage backed\n-#        memory. This can be enabled in the libvirt XML\n-#        config by adding a memory backing section to the\n-#        XML config e.g.\n-#             <memoryBacking>\n-#             <hugepages/>\n-#             </memoryBacking>\n-#        This memory backing section should be added after the\n-#        <memory> and <currentMemory> sections. This will add\n-#        flags \"-mem-prealloc -mem-path <path>\" to the QEMU\n-#        command line. The hugetlbfs_mount variable can be used\n-#        to override the default <path> passed through by libvirt.\n-#\n-#        if \"-mem-prealloc\" or \"-mem-path <path>\" are not passed\n-#        through and a vhost device is detected then these options will\n-#        be automatically added by this script. This script will detect\n-#        the system hugetlbfs mount point to be used for <path>. The\n-#        default <path> for this script can be overidden by the\n-#        hugetlbfs_dir variable in the configuration section of this script.\n-#\n-#\n-#   4.e) Restart the libvirtd system process\n-#        e.g. on Fedora \"systemctl restart libvirtd.service\"\n-#\n-#\n-#   4.f) Edit the Configuration Parameters section of this script\n-#        to point to the correct emulator location and set any\n-#        addition options\n-#\n-# The script modifies the libvirtd Qemu call by modifying/adding\n-# options based on the configuration parameters below.\n-# NOTE:\n-#     emul_path and us_vhost_path must be set\n-#     All other parameters are optional\n-#####################################################################\n-\n-\n-#############################################\n-# Configuration Parameters\n-#############################################\n-#Path to QEMU binary\n-emul_path = \"/usr/local/bin/qemu-system-x86_64\"\n-\n-#Path to userspace vhost device file\n-# This filename should match the --dev-basename --dev-index parameters of\n-# the command used to launch the userspace vhost sample application e.g.\n-# if the sample app lauch command is:\n-#    ./build/vhost-switch ..... --dev-basename usvhost --dev-index 1\n-# then this variable should be set to:\n-#   us_vhost_path = \"/dev/usvhost-1\"\n-us_vhost_path = \"/dev/usvhost-1\"\n-\n-#List of additional user defined emulation options. These options will\n-#be added to all Qemu calls\n-emul_opts_user = []\n-\n-#List of additional user defined emulation options for vhost only.\n-#These options will only be added to vhost enabled guests\n-emul_opts_user_vhost = []\n-\n-#For all VHOST enabled VMs, the VM memory is preallocated from hugetlbfs\n-# Set this variable to one to enable this option for all VMs\n-use_huge_all = 0\n-\n-#Instead of autodetecting, override the hugetlbfs directory by setting\n-#this variable\n-hugetlbfs_dir = \"\"\n-\n-#############################################\n-\n-\n-#############################################\n-# ****** Do Not Modify Below this Line ******\n-#############################################\n-\n-import sys, os, subprocess\n-\n-\n-#List of open userspace vhost file descriptors\n-fd_list = []\n-\n-#additional virtio device flags when using userspace vhost\n-vhost_flags = [ \"csum=off\",\n-                \"gso=off\",\n-                \"guest_tso4=off\",\n-                \"guest_tso6=off\",\n-                \"guest_ecn=off\"\n-              ]\n-\n-\n-#############################################\n-# Find the system hugefile mount point.\n-# Note:\n-# if multiple hugetlbfs mount points exist\n-# then the first one found will be used\n-#############################################\n-def find_huge_mount():\n-\n-    if (len(hugetlbfs_dir)):\n-        return hugetlbfs_dir\n-\n-    huge_mount = \"\"\n-\n-    if (os.access(\"/proc/mounts\", os.F_OK)):\n-        f = open(\"/proc/mounts\", \"r\")\n-        line = f.readline()\n-        while line:\n-            line_split = line.split(\" \")\n-            if line_split[2] == 'hugetlbfs':\n-                huge_mount = line_split[1]\n-                break\n-            line = f.readline()\n-    else:\n-        print \"/proc/mounts not found\"\n-        exit (1)\n-\n-    f.close\n-    if len(huge_mount) == 0:\n-        print \"Failed to find hugetlbfs mount point\"\n-        exit (1)\n-\n-    return huge_mount\n-\n-\n-#############################################\n-# Get a userspace Vhost file descriptor\n-#############################################\n-def get_vhost_fd():\n-\n-    if (os.access(us_vhost_path, os.F_OK)):\n-        fd = os.open( us_vhost_path, os.O_RDWR)\n-    else:\n-        print (\"US-Vhost file %s not found\" %us_vhost_path)\n-        exit (1)\n-\n-    return fd\n-\n-\n-#############################################\n-# Check for vhostfd. if found then replace\n-# with our own vhost fd and append any vhost\n-# flags onto the end\n-#############################################\n-def modify_netdev_arg(arg):\n-\t\n-    global fd_list\n-    vhost_in_use = 0\n-    s = ''\n-    new_opts = []\n-    netdev_opts = arg.split(\",\")\n-\n-    for opt in netdev_opts:\n-        #check if vhost is used\n-        if \"vhost\" == opt[:5]:\n-            vhost_in_use = 1\n-        else:\n-            new_opts.append(opt)\n-\n-    #if using vhost append vhost options\n-    if vhost_in_use == 1:\n-        #append vhost on option\n-        new_opts.append('vhost=on')\n-        #append vhostfd ption\n-        new_fd = get_vhost_fd()\n-        new_opts.append('vhostfd=' + str(new_fd))\n-        fd_list.append(new_fd)\n-\n-    #concatenate all options\n-    for opt in new_opts:\n-        if len(s) > 0:\n-\t\t\ts+=','\n-\n-        s+=opt\n-\n-    return s\t\n-\n-\n-#############################################\n-# Main\n-#############################################\n-def main():\n-\n-    global fd_list\n-    global vhost_in_use\n-    new_args = []\n-    num_cmd_args = len(sys.argv)\n-    emul_call = ''\n-    mem_prealloc_set = 0\n-    mem_path_set = 0\n-    num = 0;\n-\n-    #parse the parameters\n-    while (num < num_cmd_args):\n-        arg = sys.argv[num]\n-\n-\t\t#Check netdev +1 parameter for vhostfd\n-        if arg == '-netdev':\n-            num_vhost_devs = len(fd_list)\n-            new_args.append(arg)\n-\n-            num+=1\n-            arg = sys.argv[num]\n-            mod_arg = modify_netdev_arg(arg)\n-            new_args.append(mod_arg)\n-\n-            #append vhost flags if this is a vhost device\n-            # and -device is the next arg\n-            # i.e -device -opt1,-opt2,...,-opt3,%vhost\n-            if (num_vhost_devs < len(fd_list)):\n-                num+=1\n-                arg = sys.argv[num]\n-                if arg == '-device':\n-                    new_args.append(arg)\n-                    num+=1\n-                    new_arg = sys.argv[num]\n-                    for flag in vhost_flags:\n-                        new_arg = ''.join([new_arg,',',flag])\n-                    new_args.append(new_arg)\n-                else:\n-                    new_args.append(arg)\n-        elif arg == '-mem-prealloc':\n-            mem_prealloc_set = 1\n-            new_args.append(arg)\n-        elif arg == '-mem-path':\n-            mem_path_set = 1\n-            new_args.append(arg)\n-\n-        else:\n-            new_args.append(arg)\n-\n-        num+=1\n-\n-    #Set Qemu binary location\n-    emul_call+=emul_path\n-    emul_call+=\" \"\n-\n-    #Add prealloc mem options if using vhost and not already added\n-    if ((len(fd_list) > 0) and (mem_prealloc_set == 0)):\n-        emul_call += \"-mem-prealloc \"\n-\n-    #Add mempath mem options if using vhost and not already added\n-    if ((len(fd_list) > 0) and (mem_path_set == 0)):\n-        #Detect and add hugetlbfs mount point\n-        mp = find_huge_mount()\n-        mp = \"\".join([\"-mem-path \", mp])\n-        emul_call += mp\n-        emul_call += \" \"\n-\n-\n-    #add user options\n-    for opt in emul_opts_user:\n-        emul_call += opt\n-        emul_call += \" \"\n-\n-    #Add add user vhost only options\n-    if len(fd_list) > 0:\n-        for opt in emul_opts_user_vhost:\n-            emul_call += opt\n-            emul_call += \" \"\n-\n-    #Add updated libvirt options\n-    iter_args = iter(new_args)\n-    #skip 1st arg i.e. call to this script\n-    next(iter_args)\n-    for arg in iter_args:\n-        emul_call+=str(arg)\n-        emul_call+= \" \"\n-\n-    #Call QEMU\n-    subprocess.call(emul_call, shell=True)\n-\n-\n-    #Close usvhost files\n-    for fd in fd_list:\n-        os.close(fd)\n-\n-\n-if __name__ == \"__main__\":\n-    main()\n-\ndiff --git a/examples/vhost/main.c b/examples/vhost/main.c\ndeleted file mode 100644\nindex 7d9e6a2..0000000\n--- a/examples/vhost/main.c\n+++ /dev/null\n@@ -1,3722 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *   All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#include <arpa/inet.h>\n-#include <getopt.h>\n-#include <linux/if_ether.h>\n-#include <linux/if_vlan.h>\n-#include <linux/virtio_net.h>\n-#include <linux/virtio_ring.h>\n-#include <signal.h>\n-#include <stdint.h>\n-#include <sys/eventfd.h>\n-#include <sys/param.h>\n-#include <unistd.h>\n-\n-#include <rte_atomic.h>\n-#include <rte_cycles.h>\n-#include <rte_ethdev.h>\n-#include <rte_log.h>\n-#include <rte_string_fns.h>\n-#include <rte_malloc.h>\n-\n-#include \"main.h\"\n-#include \"virtio-net.h\"\n-#include \"vhost-net-cdev.h\"\n-\n-#define MAX_QUEUES 128\n-\n-/* the maximum number of external ports supported */\n-#define MAX_SUP_PORTS 1\n-\n-/*\n- * Calculate the number of buffers needed per port\n- */\n-#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) +  \t\t\\\n-\t\t\t\t\t\t\t(num_switching_cores*MAX_PKT_BURST) +  \t\t\t\\\n-\t\t\t\t\t\t\t(num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\\\n-\t\t\t\t\t\t\t(num_switching_cores*MBUF_CACHE_SIZE))\n-\n-#define MBUF_CACHE_SIZE 128\n-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)\n-\n-/*\n- * No frame data buffer allocated from host are required for zero copy\n- * implementation, guest will allocate the frame data buffer, and vhost\n- * directly use it.\n- */\n-#define VIRTIO_DESCRIPTOR_LEN_ZCP 1518\n-#define MBUF_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + sizeof(struct rte_mbuf) \\\n-\t+ RTE_PKTMBUF_HEADROOM)\n-#define MBUF_CACHE_SIZE_ZCP 0\n-\n-/*\n- * RX and TX Prefetch, Host, and Write-back threshold values should be\n- * carefully set for optimal performance. Consult the network\n- * controller's datasheet and supporting DPDK documentation for guidance\n- * on how these parameters should be set.\n- */\n-#define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */\n-#define RX_HTHRESH 8 /* Default values of RX host threshold reg. */\n-#define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */\n-\n-/*\n- * These default values are optimized for use with the Intel(R) 82599 10 GbE\n- * Controller and the DPDK ixgbe PMD. Consider using other values for other\n- * network controllers and/or network drivers.\n- */\n-#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */\n-#define TX_HTHRESH 0  /* Default values of TX host threshold reg. */\n-#define TX_WTHRESH 0  /* Default values of TX write-back threshold reg. */\n-\n-#define MAX_PKT_BURST 32 \t\t/* Max burst size for RX/TX */\n-#define MAX_MRG_PKT_BURST 16 \t/* Max burst for merge buffers. Set to 1 due to performance issue. */\n-#define BURST_TX_DRAIN_US 100 \t/* TX drain every ~100us */\n-\n-#define BURST_RX_WAIT_US 15 \t/* Defines how long we wait between retries on RX */\n-#define BURST_RX_RETRIES 4\t\t/* Number of retries on RX. */\n-\n-#define JUMBO_FRAME_MAX_SIZE    0x2600\n-\n-/* State of virtio device. */\n-#define DEVICE_MAC_LEARNING 0\n-#define DEVICE_RX\t\t\t1\n-#define DEVICE_SAFE_REMOVE\t2\n-\n-/* Config_core_flag status definitions. */\n-#define REQUEST_DEV_REMOVAL 1\n-#define ACK_DEV_REMOVAL 0\n-\n-/* Configurable number of RX/TX ring descriptors */\n-#define RTE_TEST_RX_DESC_DEFAULT 1024\n-#define RTE_TEST_TX_DESC_DEFAULT 512\n-\n-/*\n- * Need refine these 2 macros for legacy and DPDK based front end:\n- * Max vring avail descriptor/entries from guest - MAX_PKT_BURST\n- * And then adjust power 2.\n- */\n-/*\n- * For legacy front end, 128 descriptors,\n- * half for virtio header, another half for mbuf.\n- */\n-#define RTE_TEST_RX_DESC_DEFAULT_ZCP 32   /* legacy: 32, DPDK virt FE: 128. */\n-#define RTE_TEST_TX_DESC_DEFAULT_ZCP 64   /* legacy: 64, DPDK virt FE: 64.  */\n-\n-/* Get first 4 bytes in mbuf headroom. */\n-#define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \\\n-\t\t+ sizeof(struct rte_mbuf)))\n-\n-/* true if x is a power of 2 */\n-#define POWEROF2(x) ((((x)-1) & (x)) == 0)\n-\n-#define INVALID_PORT_ID 0xFF\n-\n-/* Max number of devices. Limited by vmdq. */\n-#define MAX_DEVICES 64\n-\n-/* Size of buffers used for snprintfs. */\n-#define MAX_PRINT_BUFF 6072\n-\n-/* Maximum character device basename size. */\n-#define MAX_BASENAME_SZ 10\n-\n-/* Maximum long option length for option parsing. */\n-#define MAX_LONG_OPT_SZ 64\n-\n-/* Used to compare MAC addresses. */\n-#define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL\n-\n-/* Number of descriptors per cacheline. */\n-#define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))\n-\n-/* mask of enabled ports */\n-static uint32_t enabled_port_mask = 0;\n-\n-/*Number of switching cores enabled*/\n-static uint32_t num_switching_cores = 0;\n-\n-/* number of devices/queues to support*/\n-static uint32_t num_queues = 0;\n-uint32_t num_devices = 0;\n-\n-/*\n- * Enable zero copy, pkts buffer will directly dma to hw descriptor,\n- * disabled on default.\n- */\n-static uint32_t zero_copy;\n-\n-/* number of descriptors to apply*/\n-static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP;\n-static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP;\n-\n-/* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */\n-#define MAX_RING_DESC 4096\n-\n-struct vpool {\n-\tstruct rte_mempool *pool;\n-\tstruct rte_ring *ring;\n-\tuint32_t buf_size;\n-} vpool_array[MAX_QUEUES+MAX_QUEUES];\n-\n-/* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */\n-typedef enum {\n-\tVM2VM_DISABLED = 0,\n-\tVM2VM_SOFTWARE = 1,\n-\tVM2VM_HARDWARE = 2,\n-\tVM2VM_LAST\n-} vm2vm_type;\n-static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;\n-\n-/* The type of host physical address translated from guest physical address. */\n-typedef enum {\n-\tPHYS_ADDR_CONTINUOUS = 0,\n-\tPHYS_ADDR_CROSS_SUBREG = 1,\n-\tPHYS_ADDR_INVALID = 2,\n-\tPHYS_ADDR_LAST\n-} hpa_type;\n-\n-/* Enable stats. */\n-static uint32_t enable_stats = 0;\n-/* Enable retries on RX. */\n-static uint32_t enable_retry = 1;\n-/* Specify timeout (in useconds) between retries on RX. */\n-static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;\n-/* Specify the number of retries on RX. */\n-static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;\n-\n-/* Character device basename. Can be set by user. */\n-static char dev_basename[MAX_BASENAME_SZ] = \"vhost-net\";\n-\n-/* Charater device index. Can be set by user. */\n-static uint32_t dev_index = 0;\n-\n-/* This can be set by the user so it is made available here. */\n-extern uint64_t VHOST_FEATURES;\n-\n-/* Default configuration for rx and tx thresholds etc. */\n-static struct rte_eth_rxconf rx_conf_default = {\n-\t.rx_thresh = {\n-\t\t.pthresh = RX_PTHRESH,\n-\t\t.hthresh = RX_HTHRESH,\n-\t\t.wthresh = RX_WTHRESH,\n-\t},\n-\t.rx_drop_en = 1,\n-};\n-\n-/*\n- * These default values are optimized for use with the Intel(R) 82599 10 GbE\n- * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other\n- * network controllers and/or network drivers.\n- */\n-static struct rte_eth_txconf tx_conf_default = {\n-\t.tx_thresh = {\n-\t\t.pthresh = TX_PTHRESH,\n-\t\t.hthresh = TX_HTHRESH,\n-\t\t.wthresh = TX_WTHRESH,\n-\t},\n-\t.tx_free_thresh = 0, /* Use PMD default values */\n-\t.tx_rs_thresh = 0, /* Use PMD default values */\n-};\n-\n-/* empty vmdq configuration structure. Filled in programatically */\n-static struct rte_eth_conf vmdq_conf_default = {\n-\t.rxmode = {\n-\t\t.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,\n-\t\t.split_hdr_size = 0,\n-\t\t.header_split   = 0, /**< Header Split disabled */\n-\t\t.hw_ip_checksum = 0, /**< IP checksum offload disabled */\n-\t\t.hw_vlan_filter = 0, /**< VLAN filtering disabled */\n-\t\t/*\n-\t\t * It is necessary for 1G NIC such as I350,\n-\t\t * this fixes bug of ipv4 forwarding in guest can't\n-\t\t * forward pakets from one virtio dev to another virtio dev.\n-\t\t */\n-\t\t.hw_vlan_strip  = 1, /**< VLAN strip enabled. */\n-\t\t.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */\n-\t\t.hw_strip_crc   = 0, /**< CRC stripped by hardware */\n-\t},\n-\n-\t.txmode = {\n-\t\t.mq_mode = ETH_MQ_TX_NONE,\n-\t},\n-\t.rx_adv_conf = {\n-\t\t/*\n-\t\t * should be overridden separately in code with\n-\t\t * appropriate values\n-\t\t */\n-\t\t.vmdq_rx_conf = {\n-\t\t\t.nb_queue_pools = ETH_8_POOLS,\n-\t\t\t.enable_default_pool = 0,\n-\t\t\t.default_pool = 0,\n-\t\t\t.nb_pool_maps = 0,\n-\t\t\t.pool_map = {{0, 0},},\n-\t\t},\n-\t},\n-};\n-\n-static unsigned lcore_ids[RTE_MAX_LCORE];\n-static uint8_t ports[RTE_MAX_ETHPORTS];\n-static unsigned num_ports = 0; /**< The number of ports specified in command line */\n-\n-static const uint16_t external_pkt_default_vlan_tag = 2000;\n-const uint16_t vlan_tags[] = {\n-\t1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,\n-\t1008, 1009, 1010, 1011,\t1012, 1013, 1014, 1015,\n-\t1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,\n-\t1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,\n-\t1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,\n-\t1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,\n-\t1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,\n-\t1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,\n-};\n-\n-/* ethernet addresses of ports */\n-static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];\n-\n-/* heads for the main used and free linked lists for the data path. */\n-static struct virtio_net_data_ll *ll_root_used = NULL;\n-static struct virtio_net_data_ll *ll_root_free = NULL;\n-\n-/* Array of data core structures containing information on individual core linked lists. */\n-static struct lcore_info lcore_info[RTE_MAX_LCORE];\n-\n-/* Used for queueing bursts of TX packets. */\n-struct mbuf_table {\n-\tunsigned len;\n-\tunsigned txq_id;\n-\tstruct rte_mbuf *m_table[MAX_PKT_BURST];\n-};\n-\n-/* TX queue for each data core. */\n-struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];\n-\n-/* TX queue fori each virtio device for zero copy. */\n-struct mbuf_table tx_queue_zcp[MAX_QUEUES];\n-\n-/* Vlan header struct used to insert vlan tags on TX. */\n-struct vlan_ethhdr {\n-\tunsigned char   h_dest[ETH_ALEN];\n-\tunsigned char   h_source[ETH_ALEN];\n-\t__be16          h_vlan_proto;\n-\t__be16          h_vlan_TCI;\n-\t__be16          h_vlan_encapsulated_proto;\n-};\n-\n-/* IPv4 Header */\n-struct ipv4_hdr {\n-\tuint8_t  version_ihl;\t\t/**< version and header length */\n-\tuint8_t  type_of_service;\t/**< type of service */\n-\tuint16_t total_length;\t\t/**< length of packet */\n-\tuint16_t packet_id;\t\t/**< packet ID */\n-\tuint16_t fragment_offset;\t/**< fragmentation offset */\n-\tuint8_t  time_to_live;\t\t/**< time to live */\n-\tuint8_t  next_proto_id;\t\t/**< protocol ID */\n-\tuint16_t hdr_checksum;\t\t/**< header checksum */\n-\tuint32_t src_addr;\t\t/**< source address */\n-\tuint32_t dst_addr;\t\t/**< destination address */\n-} __attribute__((__packed__));\n-\n-/* Header lengths. */\n-#define VLAN_HLEN       4\n-#define VLAN_ETH_HLEN   18\n-\n-/* Per-device statistics struct */\n-struct device_statistics {\n-\tuint64_t tx_total;\n-\trte_atomic64_t rx_total_atomic;\n-\tuint64_t rx_total;\n-\tuint64_t tx;\n-\trte_atomic64_t rx_atomic;\n-\tuint64_t rx;\n-} __rte_cache_aligned;\n-struct device_statistics dev_statistics[MAX_DEVICES];\n-\n-/*\n- * Builds up the correct configuration for VMDQ VLAN pool map\n- * according to the pool & queue limits.\n- */\n-static inline int\n-get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)\n-{\n-\tstruct rte_eth_vmdq_rx_conf conf;\n-\tunsigned i;\n-\n-\tmemset(&conf, 0, sizeof(conf));\n-\tconf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;\n-\tconf.nb_pool_maps = num_devices;\n-\tconf.enable_loop_back =\n-\t\tvmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back;\n-\n-\tfor (i = 0; i < conf.nb_pool_maps; i++) {\n-\t\tconf.pool_map[i].vlan_id = vlan_tags[ i ];\n-\t\tconf.pool_map[i].pools = (1UL << i);\n-\t}\n-\n-\t(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));\n-\t(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,\n-\t\t   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));\n-\treturn 0;\n-}\n-\n-/*\n- * Validate the device number according to the max pool number gotten form\n- * dev_info. If the device number is invalid, give the error message and\n- * return -1. Each device must have its own pool.\n- */\n-static inline int\n-validate_num_devices(uint32_t max_nb_devices)\n-{\n-\tif (num_devices > max_nb_devices) {\n-\t\tRTE_LOG(ERR, VHOST_PORT, \"invalid number of devices\\n\");\n-\t\treturn -1;\n-\t}\n-\treturn 0;\n-}\n-\n-/*\n- * Initialises a given port using global settings and with the rx buffers\n- * coming from the mbuf_pool passed as parameter\n- */\n-static inline int\n-port_init(uint8_t port)\n-{\n-\tstruct rte_eth_dev_info dev_info;\n-\tstruct rte_eth_conf port_conf;\n-\tuint16_t rx_rings, tx_rings;\n-\tuint16_t rx_ring_size, tx_ring_size;\n-\tint retval;\n-\tuint16_t q;\n-\n-\t/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */\n-\trte_eth_dev_info_get (port, &dev_info);\n-\n-\t/*configure the number of supported virtio devices based on VMDQ limits */\n-\tnum_devices = dev_info.max_vmdq_pools;\n-\tnum_queues = dev_info.max_rx_queues;\n-\n-\tif (zero_copy) {\n-\t\trx_ring_size = num_rx_descriptor;\n-\t\ttx_ring_size = num_tx_descriptor;\n-\t\ttx_rings = dev_info.max_tx_queues;\n-\t} else {\n-\t\trx_ring_size = RTE_TEST_RX_DESC_DEFAULT;\n-\t\ttx_ring_size = RTE_TEST_TX_DESC_DEFAULT;\n-\t\ttx_rings = (uint16_t)rte_lcore_count();\n-\t}\n-\n-\tretval = validate_num_devices(MAX_DEVICES);\n-\tif (retval < 0)\n-\t\treturn retval;\n-\n-\t/* Get port configuration. */\n-\tretval = get_eth_conf(&port_conf, num_devices);\n-\tif (retval < 0)\n-\t\treturn retval;\n-\n-\tif (port >= rte_eth_dev_count()) return -1;\n-\n-\trx_rings = (uint16_t)num_queues,\n-\t/* Configure ethernet device. */\n-\tretval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);\n-\tif (retval != 0)\n-\t\treturn retval;\n-\n-\t/* Setup the queues. */\n-\tfor (q = 0; q < rx_rings; q ++) {\n-\t\tretval = rte_eth_rx_queue_setup(port, q, rx_ring_size,\n-\t\t\t\t\t\trte_eth_dev_socket_id(port), &rx_conf_default,\n-\t\t\t\t\t\tvpool_array[q].pool);\n-\t\tif (retval < 0)\n-\t\t\treturn retval;\n-\t}\n-\tfor (q = 0; q < tx_rings; q ++) {\n-\t\tretval = rte_eth_tx_queue_setup(port, q, tx_ring_size,\n-\t\t\t\t\t\trte_eth_dev_socket_id(port), &tx_conf_default);\n-\t\tif (retval < 0)\n-\t\t\treturn retval;\n-\t}\n-\n-\t/* Start the device. */\n-\tretval  = rte_eth_dev_start(port);\n-\tif (retval < 0) {\n-\t\tRTE_LOG(ERR, VHOST_DATA, \"Failed to start the device.\\n\");\n-\t\treturn retval;\n-\t}\n-\n-\trte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);\n-\tRTE_LOG(INFO, VHOST_PORT, \"Max virtio devices supported: %u\\n\", num_devices);\n-\tRTE_LOG(INFO, VHOST_PORT, \"Port %u MAC: %02\"PRIx8\" %02\"PRIx8\" %02\"PRIx8\n-\t\t\t\" %02\"PRIx8\" %02\"PRIx8\" %02\"PRIx8\"\\n\",\n-\t\t\t(unsigned)port,\n-\t\t\tvmdq_ports_eth_addr[port].addr_bytes[0],\n-\t\t\tvmdq_ports_eth_addr[port].addr_bytes[1],\n-\t\t\tvmdq_ports_eth_addr[port].addr_bytes[2],\n-\t\t\tvmdq_ports_eth_addr[port].addr_bytes[3],\n-\t\t\tvmdq_ports_eth_addr[port].addr_bytes[4],\n-\t\t\tvmdq_ports_eth_addr[port].addr_bytes[5]);\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Set character device basename.\n- */\n-static int\n-us_vhost_parse_basename(const char *q_arg)\n-{\n-\t/* parse number string */\n-\n-\tif (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)\n-\t\treturn -1;\n-\telse\n-\t\tsnprintf((char*)&dev_basename, MAX_BASENAME_SZ, \"%s\", q_arg);\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Parse the portmask provided at run time.\n- */\n-static int\n-parse_portmask(const char *portmask)\n-{\n-\tchar *end = NULL;\n-\tunsigned long pm;\n-\n-\terrno = 0;\n-\n-\t/* parse hexadecimal string */\n-\tpm = strtoul(portmask, &end, 16);\n-\tif ((portmask[0] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0))\n-\t\treturn -1;\n-\n-\tif (pm == 0)\n-\t\treturn -1;\n-\n-\treturn pm;\n-\n-}\n-\n-/*\n- * Parse num options at run time.\n- */\n-static int\n-parse_num_opt(const char *q_arg, uint32_t max_valid_value)\n-{\n-\tchar *end = NULL;\n-\tunsigned long num;\n-\n-\terrno = 0;\n-\n-\t/* parse unsigned int string */\n-\tnum = strtoul(q_arg, &end, 10);\n-\tif ((q_arg[0] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0))\n-\t\treturn -1;\n-\n-\tif (num > max_valid_value)\n-\t\treturn -1;\n-\n-\treturn num;\n-\n-}\n-\n-/*\n- * Display usage\n- */\n-static void\n-us_vhost_usage(const char *prgname)\n-{\n-\tRTE_LOG(INFO, VHOST_CONFIG, \"%s [EAL options] -- -p PORTMASK\\n\"\n-\t\"\t\t--vm2vm [0|1|2]\\n\"\n-\t\"\t\t--rx_retry [0|1] --mergeable [0|1] --stats [0-N]\\n\"\n-\t\"\t\t--dev-basename <name> --dev-index [0-N]\\n\"\n-\t\"\t\t--nb-devices ND\\n\"\n-\t\"\t\t-p PORTMASK: Set mask for ports to be used by application\\n\"\n-\t\"\t\t--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\\n\"\n-\t\"\t\t--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\\n\"\n-\t\"\t\t--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\\n\"\n-\t\"\t\t--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\\n\"\n-\t\"\t\t--mergeable [0|1]: disable(default)/enable RX mergeable buffers\\n\"\n-\t\"\t\t--stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\\n\"\n-\t\"\t\t--dev-basename: The basename to be used for the character device.\\n\"\n-\t\"\t\t--dev-index [0-N]: Defaults to zero if not used. Index is appended to basename.\\n\"\n-\t\"\t\t--zero-copy [0|1]: disable(default)/enable rx/tx \"\n-\t\t\t\"zero copy\\n\"\n-\t\"\t\t--rx-desc-num [0-N]: the number of descriptors on rx, \"\n-\t\t\t\"used only when zero copy is enabled.\\n\"\n-\t\"\t\t--tx-desc-num [0-N]: the number of descriptors on tx, \"\n-\t\t\t\"used only when zero copy is enabled.\\n\",\n-\t       prgname);\n-}\n-\n-/*\n- * Parse the arguments given in the command line of the application.\n- */\n-static int\n-us_vhost_parse_args(int argc, char **argv)\n-{\n-\tint opt, ret;\n-\tint option_index;\n-\tunsigned i;\n-\tconst char *prgname = argv[0];\n-\tstatic struct option long_option[] = {\n-\t\t{\"vm2vm\", required_argument, NULL, 0},\n-\t\t{\"rx-retry\", required_argument, NULL, 0},\n-\t\t{\"rx-retry-delay\", required_argument, NULL, 0},\n-\t\t{\"rx-retry-num\", required_argument, NULL, 0},\n-\t\t{\"mergeable\", required_argument, NULL, 0},\n-\t\t{\"stats\", required_argument, NULL, 0},\n-\t\t{\"dev-basename\", required_argument, NULL, 0},\n-\t\t{\"dev-index\", required_argument, NULL, 0},\n-\t\t{\"zero-copy\", required_argument, NULL, 0},\n-\t\t{\"rx-desc-num\", required_argument, NULL, 0},\n-\t\t{\"tx-desc-num\", required_argument, NULL, 0},\n-\t\t{NULL, 0, 0, 0},\n-\t};\n-\n-\t/* Parse command line */\n-\twhile ((opt = getopt_long(argc, argv, \"p:\",long_option, &option_index)) != EOF) {\n-\t\tswitch (opt) {\n-\t\t/* Portmask */\n-\t\tcase 'p':\n-\t\t\tenabled_port_mask = parse_portmask(optarg);\n-\t\t\tif (enabled_port_mask == 0) {\n-\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid portmask\\n\");\n-\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\treturn -1;\n-\t\t\t}\n-\t\t\tbreak;\n-\n-\t\tcase 0:\n-\t\t\t/* Enable/disable vm2vm comms. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"vm2vm\",\n-\t\t\t\tMAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, (VM2VM_LAST - 1));\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG,\n-\t\t\t\t\t\t\"Invalid argument for \"\n-\t\t\t\t\t\t\"vm2vm [0|1|2]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tvm2vm_mode = (vm2vm_type)ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Enable/disable retries on RX. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"rx-retry\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, 1);\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid argument for rx-retry [0|1]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tenable_retry = ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Specify the retries delay time (in useconds) on RX. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"rx-retry-delay\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, INT32_MAX);\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid argument for rx-retry-delay [0-N]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tburst_rx_delay_time = ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Specify the retries number on RX. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"rx-retry-num\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, INT32_MAX);\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid argument for rx-retry-num [0-N]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tburst_rx_retry_num = ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Enable/disable RX mergeable buffers. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"mergeable\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, 1);\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid argument for mergeable [0|1]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tif (ret) {\n-\t\t\t\t\t\tvmdq_conf_default.rxmode.jumbo_frame = 1;\n-\t\t\t\t\t\tvmdq_conf_default.rxmode.max_rx_pkt_len\n-\t\t\t\t\t\t\t= JUMBO_FRAME_MAX_SIZE;\n-\t\t\t\t\t\tVHOST_FEATURES = (1ULL << VIRTIO_NET_F_MRG_RXBUF);\n-\t\t\t\t\t}\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Enable/disable stats. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"stats\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, INT32_MAX);\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid argument for stats [0..N]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tenable_stats = ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Set character device basename. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"dev-basename\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tif (us_vhost_parse_basename(optarg) == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid argument for character device basename (Max %d characters)\\n\", MAX_BASENAME_SZ);\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Set character device index. */\n-\t\t\tif (!strncmp(long_option[option_index].name, \"dev-index\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, INT32_MAX);\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG, \"Invalid argument for character device index [0..N]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else\n-\t\t\t\t\tdev_index = ret;\n-\t\t\t}\n-\n-\t\t\t/* Enable/disable rx/tx zero copy. */\n-\t\t\tif (!strncmp(long_option[option_index].name,\n-\t\t\t\t\"zero-copy\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, 1);\n-\t\t\t\tif (ret == -1) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG,\n-\t\t\t\t\t\t\"Invalid argument\"\n-\t\t\t\t\t\t\" for zero-copy [0|1]\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else\n-\t\t\t\t\tzero_copy = ret;\n-\n-\t\t\t\tif (zero_copy) {\n-#ifdef RTE_MBUF_SCATTER_GATHER\n-\t\t\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Before running \"\n-\t\t\t\t\t\"zero copy vhost APP, please \"\n-\t\t\t\t\t\"disable RTE_MBUF_SCATTER_GATHER\\n\"\n-\t\t\t\t\t\"in config file and then rebuild DPDK \"\n-\t\t\t\t\t\"core lib!\\n\"\n-\t\t\t\t\t\"Otherwise please disable zero copy \"\n-\t\t\t\t\t\"flag in command line!\\n\");\n-\t\t\t\t\treturn -1;\n-#endif\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Specify the descriptor number on RX. */\n-\t\t\tif (!strncmp(long_option[option_index].name,\n-\t\t\t\t\"rx-desc-num\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, MAX_RING_DESC);\n-\t\t\t\tif ((ret == -1) || (!POWEROF2(ret))) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG,\n-\t\t\t\t\t\"Invalid argument for rx-desc-num[0-N],\"\n-\t\t\t\t\t\"power of 2 required.\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tnum_rx_descriptor = ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\t/* Specify the descriptor number on TX. */\n-\t\t\tif (!strncmp(long_option[option_index].name,\n-\t\t\t\t\"tx-desc-num\", MAX_LONG_OPT_SZ)) {\n-\t\t\t\tret = parse_num_opt(optarg, MAX_RING_DESC);\n-\t\t\t\tif ((ret == -1) || (!POWEROF2(ret))) {\n-\t\t\t\t\tRTE_LOG(INFO, VHOST_CONFIG,\n-\t\t\t\t\t\"Invalid argument for tx-desc-num [0-N],\"\n-\t\t\t\t\t\"power of 2 required.\\n\");\n-\t\t\t\t\tus_vhost_usage(prgname);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t} else {\n-\t\t\t\t\tnum_tx_descriptor = ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\tbreak;\n-\n-\t\t\t/* Invalid option - print options. */\n-\t\tdefault:\n-\t\t\tus_vhost_usage(prgname);\n-\t\t\treturn -1;\n-\t\t}\n-\t}\n-\n-\tfor (i = 0; i < RTE_MAX_ETHPORTS; i++) {\n-\t\tif (enabled_port_mask & (1 << i))\n-\t\t\tports[num_ports++] = (uint8_t)i;\n-\t}\n-\n-\tif ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {\n-\t\tRTE_LOG(INFO, VHOST_PORT, \"Current enabled port number is %u,\"\n-\t\t\t\"but only %u port can be enabled\\n\",num_ports, MAX_SUP_PORTS);\n-\t\treturn -1;\n-\t}\n-\n-\tif ((zero_copy == 1) && (vm2vm_mode == VM2VM_SOFTWARE)) {\n-\t\tRTE_LOG(INFO, VHOST_PORT,\n-\t\t\t\"Vhost zero copy doesn't support software vm2vm,\"\n-\t\t\t\"please specify 'vm2vm 2' to use hardware vm2vm.\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif ((zero_copy == 1) && (vmdq_conf_default.rxmode.jumbo_frame == 1)) {\n-\t\tRTE_LOG(INFO, VHOST_PORT,\n-\t\t\t\"Vhost zero copy doesn't support jumbo frame,\"\n-\t\t\t\"please specify '--mergeable 0' to disable the \"\n-\t\t\t\"mergeable feature.\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Update the global var NUM_PORTS and array PORTS according to system ports number\n- * and return valid ports number\n- */\n-static unsigned check_ports_num(unsigned nb_ports)\n-{\n-\tunsigned valid_num_ports = num_ports;\n-\tunsigned portid;\n-\n-\tif (num_ports > nb_ports) {\n-\t\tRTE_LOG(INFO, VHOST_PORT, \"\\nSpecified port number(%u) exceeds total system port number(%u)\\n\",\n-\t\t\tnum_ports, nb_ports);\n-\t\tnum_ports = nb_ports;\n-\t}\n-\n-\tfor (portid = 0; portid < num_ports; portid ++) {\n-\t\tif (ports[portid] >= nb_ports) {\n-\t\t\tRTE_LOG(INFO, VHOST_PORT, \"\\nSpecified port ID(%u) exceeds max system port ID(%u)\\n\",\n-\t\t\t\tports[portid], (nb_ports - 1));\n-\t\t\tports[portid] = INVALID_PORT_ID;\n-\t\t\tvalid_num_ports--;\n-\t\t}\n-\t}\n-\treturn valid_num_ports;\n-}\n-\n-/*\n- * Macro to print out packet contents. Wrapped in debug define so that the\n- * data path is not effected when debug is disabled.\n- */\n-#ifdef DEBUG\n-#define PRINT_PACKET(device, addr, size, header) do {\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\tchar *pkt_addr = (char*)(addr);\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\tunsigned int index;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\tchar packet[MAX_PRINT_BUFF];\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\tif ((header))\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\tsnprintf(packet, MAX_PRINT_BUFF, \"(%\"PRIu64\") Header size %d: \", (device->device_fh), (size));\t\t\t\t\\\n-\telse\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\tsnprintf(packet, MAX_PRINT_BUFF, \"(%\"PRIu64\") Packet size %d: \", (device->device_fh), (size));\t\t\t\t\\\n-\tfor (index = 0; index < (size); index++) {\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\tsnprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF),\t\\\n-\t\t\t\"%02hhx \", pkt_addr[index]);\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\tsnprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \"\\n\");\t\\\n-\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\tLOG_DEBUG(VHOST_DATA, \"%s\", packet);\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-} while(0)\n-#else\n-#define PRINT_PACKET(device, addr, size, header) do{} while(0)\n-#endif\n-\n-/*\n- * Function to convert guest physical addresses to vhost virtual addresses. This\n- * is used to convert virtio buffer addresses.\n- */\n-static inline uint64_t __attribute__((always_inline))\n-gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)\n-{\n-\tstruct virtio_memory_regions *region;\n-\tuint32_t regionidx;\n-\tuint64_t vhost_va = 0;\n-\n-\tfor (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {\n-\t\tregion = &dev->mem->regions[regionidx];\n-\t\tif ((guest_pa >= region->guest_phys_address) &&\n-\t\t\t(guest_pa <= region->guest_phys_address_end)) {\n-\t\t\tvhost_va = region->address_offset + guest_pa;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") GPA %p| VVA %p\\n\",\n-\t\tdev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);\n-\n-\treturn vhost_va;\n-}\n-\n-/*\n- * Function to convert guest physical addresses to vhost physical addresses.\n- * This is used to convert virtio buffer addresses.\n- */\n-static inline uint64_t __attribute__((always_inline))\n-gpa_to_hpa(struct virtio_net *dev, uint64_t guest_pa,\n-\tuint32_t buf_len, hpa_type *addr_type)\n-{\n-\tstruct virtio_memory_regions_hpa *region;\n-\tuint32_t regionidx;\n-\tuint64_t vhost_pa = 0;\n-\n-\t*addr_type = PHYS_ADDR_INVALID;\n-\n-\tfor (regionidx = 0; regionidx < dev->mem->nregions_hpa; regionidx++) {\n-\t\tregion = &dev->mem->regions_hpa[regionidx];\n-\t\tif ((guest_pa >= region->guest_phys_address) &&\n-\t\t\t(guest_pa <= region->guest_phys_address_end)) {\n-\t\t\tvhost_pa = region->host_phys_addr_offset + guest_pa;\n-\t\t\tif (likely((guest_pa + buf_len - 1)\n-\t\t\t\t<= region->guest_phys_address_end))\n-\t\t\t\t*addr_type = PHYS_ADDR_CONTINUOUS;\n-\t\t\telse\n-\t\t\t\t*addr_type = PHYS_ADDR_CROSS_SUBREG;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") GPA %p| HPA %p\\n\",\n-\t\tdev->device_fh, (void *)(uintptr_t)guest_pa,\n-\t\t(void *)(uintptr_t)vhost_pa);\n-\n-\treturn vhost_pa;\n-}\n-\n-/*\n- * This function adds buffers to the virtio devices RX virtqueue. Buffers can\n- * be received from the physical port or from another virtio device. A packet\n- * count is returned to indicate the number of packets that were succesfully\n- * added to the RX queue. This function works when mergeable is disabled.\n- */\n-static inline uint32_t __attribute__((always_inline))\n-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)\n-{\n-\tstruct vhost_virtqueue *vq;\n-\tstruct vring_desc *desc;\n-\tstruct rte_mbuf *buff;\n-\t/* The virtio_hdr is initialised to 0. */\n-\tstruct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};\n-\tuint64_t buff_addr = 0;\n-\tuint64_t buff_hdr_addr = 0;\n-\tuint32_t head[MAX_PKT_BURST], packet_len = 0;\n-\tuint32_t head_idx, packet_success = 0;\n-\tuint32_t retry = 0;\n-\tuint16_t avail_idx, res_cur_idx;\n-\tuint16_t res_base_idx, res_end_idx;\n-\tuint16_t free_entries;\n-\tuint8_t success = 0;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") virtio_dev_rx()\\n\", dev->device_fh);\n-\tvq = dev->virtqueue[VIRTIO_RXQ];\n-\tcount = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;\n-\n-\t/* As many data cores may want access to available buffers, they need to be reserved. */\n-\tdo {\n-\t\tres_base_idx = vq->last_used_idx_res;\n-\t\tavail_idx = *((volatile uint16_t *)&vq->avail->idx);\n-\n-\t\tfree_entries = (avail_idx - res_base_idx);\n-\t\t/* If retry is enabled and the queue is full then we wait and retry to avoid packet loss. */\n-\t\tif (enable_retry && unlikely(count > free_entries)) {\n-\t\t\tfor (retry = 0; retry < burst_rx_retry_num; retry++) {\n-\t\t\t\trte_delay_us(burst_rx_delay_time);\n-\t\t\t\tavail_idx =\n-\t\t\t\t\t*((volatile uint16_t *)&vq->avail->idx);\n-\t\t\t\tfree_entries = (avail_idx - res_base_idx);\n-\t\t\t\tif (count <= free_entries)\n-\t\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\n-\t\t/*check that we have enough buffers*/\n-\t\tif (unlikely(count > free_entries))\n-\t\t\tcount = free_entries;\n-\n-\t\tif (count == 0)\n-\t\t\treturn 0;\n-\n-\t\tres_end_idx = res_base_idx + count;\n-\t\t/* vq->last_used_idx_res is atomically updated. */\n-\t\tsuccess = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,\n-\t\t\t\t\t\t\t\t\tres_end_idx);\n-\t} while (unlikely(success == 0));\n-\tres_cur_idx = res_base_idx;\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Current Index %d| End Index %d\\n\", dev->device_fh, res_cur_idx, res_end_idx);\n-\n-\t/* Prefetch available ring to retrieve indexes. */\n-\trte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);\n-\n-\t/* Retrieve all of the head indexes first to avoid caching issues. */\n-\tfor (head_idx = 0; head_idx < count; head_idx++)\n-\t\thead[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];\n-\n-\t/*Prefetch descriptor index. */\n-\trte_prefetch0(&vq->desc[head[packet_success]]);\n-\n-\twhile (res_cur_idx != res_end_idx) {\n-\t\t/* Get descriptor from available ring */\n-\t\tdesc = &vq->desc[head[packet_success]];\n-\n-\t\tbuff = pkts[packet_success];\n-\n-\t\t/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */\n-\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n-\t\t/* Prefetch buffer address. */\n-\t\trte_prefetch0((void*)(uintptr_t)buff_addr);\n-\n-\t\t/* Copy virtio_hdr to packet and increment buffer address */\n-\t\tbuff_hdr_addr = buff_addr;\n-\t\tpacket_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;\n-\n-\t\t/*\n-\t\t * If the descriptors are chained the header and data are\n-\t\t * placed in separate buffers.\n-\t\t */\n-\t\tif (desc->flags & VRING_DESC_F_NEXT) {\n-\t\t\tdesc->len = vq->vhost_hlen;\n-\t\t\tdesc = &vq->desc[desc->next];\n-\t\t\t/* Buffer address translation. */\n-\t\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n-\t\t\tdesc->len = rte_pktmbuf_data_len(buff);\n-\t\t} else {\n-\t\t\tbuff_addr += vq->vhost_hlen;\n-\t\t\tdesc->len = packet_len;\n-\t\t}\n-\n-\t\t/* Update used ring with desc information */\n-\t\tvq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];\n-\t\tvq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;\n-\n-\t\t/* Copy mbuf data to buffer */\n-\t\trte_memcpy((void *)(uintptr_t)buff_addr,\n-\t\t\t(const void *)buff->pkt.data,\n-\t\t\trte_pktmbuf_data_len(buff));\n-\t\tPRINT_PACKET(dev, (uintptr_t)buff_addr,\n-\t\t\trte_pktmbuf_data_len(buff), 0);\n-\n-\t\tres_cur_idx++;\n-\t\tpacket_success++;\n-\n-\t\trte_memcpy((void *)(uintptr_t)buff_hdr_addr,\n-\t\t\t(const void *)&virtio_hdr, vq->vhost_hlen);\n-\n-\t\tPRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);\n-\n-\t\tif (res_cur_idx < res_end_idx) {\n-\t\t\t/* Prefetch descriptor index. */\n-\t\t\trte_prefetch0(&vq->desc[head[packet_success]]);\n-\t\t}\n-\t}\n-\n-\trte_compiler_barrier();\n-\n-\t/* Wait until it's our turn to add our buffer to the used ring. */\n-\twhile (unlikely(vq->last_used_idx != res_base_idx))\n-\t\trte_pause();\n-\n-\t*(volatile uint16_t *)&vq->used->idx += count;\n-\tvq->last_used_idx = res_end_idx;\n-\n-\t/* Kick the guest if necessary. */\n-\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n-\t\teventfd_write((int)vq->kickfd, 1);\n-\treturn count;\n-}\n-\n-static inline uint32_t __attribute__((always_inline))\n-copy_from_mbuf_to_vring(struct virtio_net *dev,\n-\tuint16_t res_base_idx, uint16_t res_end_idx,\n-\tstruct rte_mbuf *pkt)\n-{\n-\tuint32_t vec_idx = 0;\n-\tuint32_t entry_success = 0;\n-\tstruct vhost_virtqueue *vq;\n-\t/* The virtio_hdr is initialised to 0. */\n-\tstruct virtio_net_hdr_mrg_rxbuf virtio_hdr = {\n-\t\t{0, 0, 0, 0, 0, 0}, 0};\n-\tuint16_t cur_idx = res_base_idx;\n-\tuint64_t vb_addr = 0;\n-\tuint64_t vb_hdr_addr = 0;\n-\tuint32_t seg_offset = 0;\n-\tuint32_t vb_offset = 0;\n-\tuint32_t seg_avail;\n-\tuint32_t vb_avail;\n-\tuint32_t cpy_len, entry_len;\n-\n-\tif (pkt == NULL)\n-\t\treturn 0;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Current Index %d| \"\n-\t\t\"End Index %d\\n\",\n-\t\tdev->device_fh, cur_idx, res_end_idx);\n-\n-\t/*\n-\t * Convert from gpa to vva\n-\t * (guest physical addr -> vhost virtual addr)\n-\t */\n-\tvq = dev->virtqueue[VIRTIO_RXQ];\n-\tvb_addr =\n-\t\tgpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);\n-\tvb_hdr_addr = vb_addr;\n-\n-\t/* Prefetch buffer address. */\n-\trte_prefetch0((void *)(uintptr_t)vb_addr);\n-\n-\tvirtio_hdr.num_buffers = res_end_idx - res_base_idx;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") RX: Num merge buffers %d\\n\",\n-\t\tdev->device_fh, virtio_hdr.num_buffers);\n-\n-\trte_memcpy((void *)(uintptr_t)vb_hdr_addr,\n-\t\t(const void *)&virtio_hdr, vq->vhost_hlen);\n-\n-\tPRINT_PACKET(dev, (uintptr_t)vb_hdr_addr, vq->vhost_hlen, 1);\n-\n-\tseg_avail = rte_pktmbuf_data_len(pkt);\n-\tvb_offset = vq->vhost_hlen;\n-\tvb_avail =\n-\t\tvq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;\n-\n-\tentry_len = vq->vhost_hlen;\n-\n-\tif (vb_avail == 0) {\n-\t\tuint32_t desc_idx =\n-\t\t\tvq->buf_vec[vec_idx].desc_idx;\n-\t\tvq->desc[desc_idx].len = vq->vhost_hlen;\n-\n-\t\tif ((vq->desc[desc_idx].flags\n-\t\t\t& VRING_DESC_F_NEXT) == 0) {\n-\t\t\t/* Update used ring with desc information */\n-\t\t\tvq->used->ring[cur_idx & (vq->size - 1)].id\n-\t\t\t\t= vq->buf_vec[vec_idx].desc_idx;\n-\t\t\tvq->used->ring[cur_idx & (vq->size - 1)].len\n-\t\t\t\t= entry_len;\n-\n-\t\t\tentry_len = 0;\n-\t\t\tcur_idx++;\n-\t\t\tentry_success++;\n-\t\t}\n-\n-\t\tvec_idx++;\n-\t\tvb_addr =\n-\t\t\tgpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);\n-\n-\t\t/* Prefetch buffer address. */\n-\t\trte_prefetch0((void *)(uintptr_t)vb_addr);\n-\t\tvb_offset = 0;\n-\t\tvb_avail = vq->buf_vec[vec_idx].buf_len;\n-\t}\n-\n-\tcpy_len = RTE_MIN(vb_avail, seg_avail);\n-\n-\twhile (cpy_len > 0) {\n-\t\t/* Copy mbuf data to vring buffer */\n-\t\trte_memcpy((void *)(uintptr_t)(vb_addr + vb_offset),\n-\t\t\t(const void *)(rte_pktmbuf_mtod(pkt, char*) + seg_offset),\n-\t\t\tcpy_len);\n-\n-\t\tPRINT_PACKET(dev,\n-\t\t\t(uintptr_t)(vb_addr + vb_offset),\n-\t\t\tcpy_len, 0);\n-\n-\t\tseg_offset += cpy_len;\n-\t\tvb_offset += cpy_len;\n-\t\tseg_avail -= cpy_len;\n-\t\tvb_avail -= cpy_len;\n-\t\tentry_len += cpy_len;\n-\n-\t\tif (seg_avail != 0) {\n-\t\t\t/*\n-\t\t\t * The virtio buffer in this vring\n-\t\t\t * entry reach to its end.\n-\t\t\t * But the segment doesn't complete.\n-\t\t\t */\n-\t\t\tif ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &\n-\t\t\t\tVRING_DESC_F_NEXT) == 0) {\n-\t\t\t\t/* Update used ring with desc information */\n-\t\t\t\tvq->used->ring[cur_idx & (vq->size - 1)].id\n-\t\t\t\t\t= vq->buf_vec[vec_idx].desc_idx;\n-\t\t\t\tvq->used->ring[cur_idx & (vq->size - 1)].len\n-\t\t\t\t\t= entry_len;\n-\t\t\t\tentry_len = 0;\n-\t\t\t\tcur_idx++;\n-\t\t\t\tentry_success++;\n-\t\t\t}\n-\n-\t\t\tvec_idx++;\n-\t\t\tvb_addr = gpa_to_vva(dev,\n-\t\t\t\tvq->buf_vec[vec_idx].buf_addr);\n-\t\t\tvb_offset = 0;\n-\t\t\tvb_avail = vq->buf_vec[vec_idx].buf_len;\n-\t\t\tcpy_len = RTE_MIN(vb_avail, seg_avail);\n-\t\t} else {\n-\t\t\t/*\n-\t\t\t * This current segment complete, need continue to\n-\t\t\t * check if the whole packet complete or not.\n-\t\t\t */\n-\t\t\tpkt = pkt->pkt.next;\n-\t\t\tif (pkt != NULL) {\n-\t\t\t\t/*\n-\t\t\t\t * There are more segments.\n-\t\t\t\t */\n-\t\t\t\tif (vb_avail == 0) {\n-\t\t\t\t\t/*\n-\t\t\t\t\t * This current buffer from vring is\n-\t\t\t\t\t * used up, need fetch next buffer\n-\t\t\t\t\t * from buf_vec.\n-\t\t\t\t\t */\n-\t\t\t\t\tuint32_t desc_idx =\n-\t\t\t\t\t\tvq->buf_vec[vec_idx].desc_idx;\n-\t\t\t\t\tvq->desc[desc_idx].len = vb_offset;\n-\n-\t\t\t\t\tif ((vq->desc[desc_idx].flags &\n-\t\t\t\t\t\tVRING_DESC_F_NEXT) == 0) {\n-\t\t\t\t\t\tuint16_t wrapped_idx =\n-\t\t\t\t\t\t\tcur_idx & (vq->size - 1);\n-\t\t\t\t\t\t/*\n-\t\t\t\t\t\t * Update used ring with the\n-\t\t\t\t\t\t * descriptor information\n-\t\t\t\t\t\t */\n-\t\t\t\t\t\tvq->used->ring[wrapped_idx].id\n-\t\t\t\t\t\t\t= desc_idx;\n-\t\t\t\t\t\tvq->used->ring[wrapped_idx].len\n-\t\t\t\t\t\t\t= entry_len;\n-\t\t\t\t\t\tentry_success++;\n-\t\t\t\t\t\tentry_len = 0;\n-\t\t\t\t\t\tcur_idx++;\n-\t\t\t\t\t}\n-\n-\t\t\t\t\t/* Get next buffer from buf_vec. */\n-\t\t\t\t\tvec_idx++;\n-\t\t\t\t\tvb_addr = gpa_to_vva(dev,\n-\t\t\t\t\t\tvq->buf_vec[vec_idx].buf_addr);\n-\t\t\t\t\tvb_avail =\n-\t\t\t\t\t\tvq->buf_vec[vec_idx].buf_len;\n-\t\t\t\t\tvb_offset = 0;\n-\t\t\t\t}\n-\n-\t\t\t\tseg_offset = 0;\n-\t\t\t\tseg_avail = rte_pktmbuf_data_len(pkt);\n-\t\t\t\tcpy_len = RTE_MIN(vb_avail, seg_avail);\n-\t\t\t} else {\n-\t\t\t\t/*\n-\t\t\t\t * This whole packet completes.\n-\t\t\t\t */\n-\t\t\t\tuint32_t desc_idx =\n-\t\t\t\t\tvq->buf_vec[vec_idx].desc_idx;\n-\t\t\t\tvq->desc[desc_idx].len = vb_offset;\n-\n-\t\t\t\twhile (vq->desc[desc_idx].flags &\n-\t\t\t\t\tVRING_DESC_F_NEXT) {\n-\t\t\t\t\tdesc_idx = vq->desc[desc_idx].next;\n-\t\t\t\t\t vq->desc[desc_idx].len = 0;\n-\t\t\t\t}\n-\n-\t\t\t\t/* Update used ring with desc information */\n-\t\t\t\tvq->used->ring[cur_idx & (vq->size - 1)].id\n-\t\t\t\t\t= vq->buf_vec[vec_idx].desc_idx;\n-\t\t\t\tvq->used->ring[cur_idx & (vq->size - 1)].len\n-\t\t\t\t\t= entry_len;\n-\t\t\t\tentry_len = 0;\n-\t\t\t\tcur_idx++;\n-\t\t\t\tentry_success++;\n-\t\t\t\tseg_avail = 0;\n-\t\t\t\tcpy_len = RTE_MIN(vb_avail, seg_avail);\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\treturn entry_success;\n-}\n-\n-/*\n- * This function adds buffers to the virtio devices RX virtqueue. Buffers can\n- * be received from the physical port or from another virtio device. A packet\n- * count is returned to indicate the number of packets that were succesfully\n- * added to the RX queue. This function works for mergeable RX.\n- */\n-static inline uint32_t __attribute__((always_inline))\n-virtio_dev_merge_rx(struct virtio_net *dev, struct rte_mbuf **pkts,\n-\tuint32_t count)\n-{\n-\tstruct vhost_virtqueue *vq;\n-\tuint32_t pkt_idx = 0, entry_success = 0;\n-\tuint32_t retry = 0;\n-\tuint16_t avail_idx, res_cur_idx;\n-\tuint16_t res_base_idx, res_end_idx;\n-\tuint8_t success = 0;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") virtio_dev_merge_rx()\\n\",\n-\t\tdev->device_fh);\n-\tvq = dev->virtqueue[VIRTIO_RXQ];\n-\tcount = RTE_MIN((uint32_t)MAX_PKT_BURST, count);\n-\n-\tif (count == 0)\n-\t\treturn 0;\n-\n-\tfor (pkt_idx = 0; pkt_idx < count; pkt_idx++) {\n-\t\tuint32_t secure_len = 0;\n-\t\tuint16_t need_cnt;\n-\t\tuint32_t vec_idx = 0;\n-\t\tuint32_t pkt_len = pkts[pkt_idx]->pkt.pkt_len + vq->vhost_hlen;\n-\t\tuint16_t i, id;\n-\n-\t\tdo {\n-\t\t\t/*\n-\t\t\t * As many data cores may want access to available\n-\t\t\t * buffers, they need to be reserved.\n-\t\t\t */\n-\t\t\tres_base_idx = vq->last_used_idx_res;\n-\t\t\tres_cur_idx = res_base_idx;\n-\n-\t\t\tdo {\n-\t\t\t\tavail_idx = *((volatile uint16_t *)&vq->avail->idx);\n-\t\t\t\tif (unlikely(res_cur_idx == avail_idx)) {\n-\t\t\t\t\t/*\n-\t\t\t\t\t * If retry is enabled and the queue is\n-\t\t\t\t\t * full then we wait and retry to avoid\n-\t\t\t\t\t * packet loss.\n-\t\t\t\t\t */\n-\t\t\t\t\tif (enable_retry) {\n-\t\t\t\t\t\tuint8_t cont = 0;\n-\t\t\t\t\t\tfor (retry = 0; retry < burst_rx_retry_num; retry++) {\n-\t\t\t\t\t\t\trte_delay_us(burst_rx_delay_time);\n-\t\t\t\t\t\t\tavail_idx =\n-\t\t\t\t\t\t\t\t*((volatile uint16_t *)&vq->avail->idx);\n-\t\t\t\t\t\t\tif (likely(res_cur_idx != avail_idx)) {\n-\t\t\t\t\t\t\t\tcont = 1;\n-\t\t\t\t\t\t\t\tbreak;\n-\t\t\t\t\t\t\t}\n-\t\t\t\t\t\t}\n-\t\t\t\t\t\tif (cont == 1)\n-\t\t\t\t\t\t\tcontinue;\n-\t\t\t\t\t}\n-\n-\t\t\t\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\t\t\t\"(%\"PRIu64\") Failed \"\n-\t\t\t\t\t\t\"to get enough desc from \"\n-\t\t\t\t\t\t\"vring\\n\",\n-\t\t\t\t\t\tdev->device_fh);\n-\t\t\t\t\treturn pkt_idx;\n-\t\t\t\t} else {\n-\t\t\t\t\tuint16_t wrapped_idx =\n-\t\t\t\t\t\t(res_cur_idx) & (vq->size - 1);\n-\t\t\t\t\tuint32_t idx =\n-\t\t\t\t\t\tvq->avail->ring[wrapped_idx];\n-\t\t\t\t\tuint8_t next_desc;\n-\n-\t\t\t\t\tdo {\n-\t\t\t\t\t\tnext_desc = 0;\n-\t\t\t\t\t\tsecure_len += vq->desc[idx].len;\n-\t\t\t\t\t\tif (vq->desc[idx].flags &\n-\t\t\t\t\t\t\tVRING_DESC_F_NEXT) {\n-\t\t\t\t\t\t\tidx = vq->desc[idx].next;\n-\t\t\t\t\t\t\tnext_desc = 1;\n-\t\t\t\t\t\t}\n-\t\t\t\t\t} while (next_desc);\n-\n-\t\t\t\t\tres_cur_idx++;\n-\t\t\t\t}\n-\t\t\t} while (pkt_len > secure_len);\n-\n-\t\t\t/* vq->last_used_idx_res is atomically updated. */\n-\t\t\tsuccess = rte_atomic16_cmpset(&vq->last_used_idx_res,\n-\t\t\t\t\t\t\tres_base_idx,\n-\t\t\t\t\t\t\tres_cur_idx);\n-\t\t} while (success == 0);\n-\n-\t\tid = res_base_idx;\n-\t\tneed_cnt = res_cur_idx - res_base_idx;\n-\n-\t\tfor (i = 0; i < need_cnt; i++, id++) {\n-\t\t\tuint16_t wrapped_idx = id & (vq->size - 1);\n-\t\t\tuint32_t idx = vq->avail->ring[wrapped_idx];\n-\t\t\tuint8_t next_desc;\n-\t\t\tdo {\n-\t\t\t\tnext_desc = 0;\n-\t\t\t\tvq->buf_vec[vec_idx].buf_addr =\n-\t\t\t\t\tvq->desc[idx].addr;\n-\t\t\t\tvq->buf_vec[vec_idx].buf_len =\n-\t\t\t\t\tvq->desc[idx].len;\n-\t\t\t\tvq->buf_vec[vec_idx].desc_idx = idx;\n-\t\t\t\tvec_idx++;\n-\n-\t\t\t\tif (vq->desc[idx].flags & VRING_DESC_F_NEXT) {\n-\t\t\t\t\tidx = vq->desc[idx].next;\n-\t\t\t\t\tnext_desc = 1;\n-\t\t\t\t}\n-\t\t\t} while (next_desc);\n-\t\t}\n-\n-\t\tres_end_idx = res_cur_idx;\n-\n-\t\tentry_success = copy_from_mbuf_to_vring(dev, res_base_idx,\n-\t\t\tres_end_idx, pkts[pkt_idx]);\n-\n-\t\trte_compiler_barrier();\n-\n-\t\t/*\n-\t\t * Wait until it's our turn to add our buffer\n-\t\t * to the used ring.\n-\t\t */\n-\t\twhile (unlikely(vq->last_used_idx != res_base_idx))\n-\t\t\trte_pause();\n-\n-\t\t*(volatile uint16_t *)&vq->used->idx += entry_success;\n-\t\tvq->last_used_idx = res_end_idx;\n-\n-\t\t/* Kick the guest if necessary. */\n-\t\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n-\t\t\teventfd_write((int)vq->kickfd, 1);\n-\t}\n-\n-\treturn count;\n-}\n-\n-/*\n- * Compares a packet destination MAC address to a device MAC address.\n- */\n-static inline int __attribute__((always_inline))\n-ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)\n-{\n-\treturn (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);\n-}\n-\n-/*\n- * This function learns the MAC address of the device and registers this along with a\n- * vlan tag to a VMDQ.\n- */\n-static int\n-link_vmdq(struct virtio_net *dev, struct rte_mbuf *m)\n-{\n-\tstruct ether_hdr *pkt_hdr;\n-\tstruct virtio_net_data_ll *dev_ll;\n-\tint i, ret;\n-\n-\t/* Learn MAC address of guest device from packet */\n-\tpkt_hdr = (struct ether_hdr *)m->pkt.data;\n-\n-\tdev_ll = ll_root_used;\n-\n-\twhile (dev_ll != NULL) {\n-\t\tif (ether_addr_cmp(&(pkt_hdr->s_addr), &dev_ll->dev->mac_address)) {\n-\t\t\tRTE_LOG(INFO, VHOST_DATA, \"(%\"PRIu64\") WARNING: This device is using an existing MAC address and has not been registered.\\n\", dev->device_fh);\n-\t\t\treturn -1;\n-\t\t}\n-\t\tdev_ll = dev_ll->next;\n-\t}\n-\n-\tfor (i = 0; i < ETHER_ADDR_LEN; i++)\n-\t\tdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];\n-\n-\t/* vlan_tag currently uses the device_id. */\n-\tdev->vlan_tag = vlan_tags[dev->device_fh];\n-\n-\t/* Print out VMDQ registration info. */\n-\tRTE_LOG(INFO, VHOST_DATA, \"(%\"PRIu64\") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\\n\",\n-\t\tdev->device_fh,\n-\t\tdev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],\n-\t\tdev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],\n-\t\tdev->mac_address.addr_bytes[4], dev->mac_address.addr_bytes[5],\n-\t\tdev->vlan_tag);\n-\n-\t/* Register the MAC address. */\n-\tret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);\n-\tif (ret)\n-\t\tRTE_LOG(ERR, VHOST_DATA, \"(%\"PRIu64\") Failed to add device MAC address to VMDQ\\n\",\n-\t\t\t\t\tdev->device_fh);\n-\n-\t/* Enable stripping of the vlan tag as we handle routing. */\n-\trte_eth_dev_set_vlan_strip_on_queue(ports[0], (uint16_t)dev->vmdq_rx_q, 1);\n-\n-\t/* Set device as ready for RX. */\n-\tdev->ready = DEVICE_RX;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX\n- * queue before disabling RX on the device.\n- */\n-static inline void\n-unlink_vmdq(struct virtio_net *dev)\n-{\n-\tunsigned i = 0;\n-\tunsigned rx_count;\n-\tstruct rte_mbuf *pkts_burst[MAX_PKT_BURST];\n-\n-\tif (dev->ready == DEVICE_RX) {\n-\t\t/*clear MAC and VLAN settings*/\n-\t\trte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);\n-\t\tfor (i = 0; i < 6; i++)\n-\t\t\tdev->mac_address.addr_bytes[i] = 0;\n-\n-\t\tdev->vlan_tag = 0;\n-\n-\t\t/*Clear out the receive buffers*/\n-\t\trx_count = rte_eth_rx_burst(ports[0],\n-\t\t\t\t\t(uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);\n-\n-\t\twhile (rx_count) {\n-\t\t\tfor (i = 0; i < rx_count; i++)\n-\t\t\t\trte_pktmbuf_free(pkts_burst[i]);\n-\n-\t\t\trx_count = rte_eth_rx_burst(ports[0],\n-\t\t\t\t\t(uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);\n-\t\t}\n-\n-\t\tdev->ready = DEVICE_MAC_LEARNING;\n-\t}\n-}\n-\n-/*\n- * Check if the packet destination MAC address is for a local device. If so then put\n- * the packet on that devices RX queue. If not then return.\n- */\n-static inline unsigned __attribute__((always_inline))\n-virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)\n-{\n-\tstruct virtio_net_data_ll *dev_ll;\n-\tstruct ether_hdr *pkt_hdr;\n-\tuint64_t ret = 0;\n-\n-\tpkt_hdr = (struct ether_hdr *)m->pkt.data;\n-\n-\t/*get the used devices list*/\n-\tdev_ll = ll_root_used;\n-\n-\twhile (dev_ll != NULL) {\n-\t\tif ((dev_ll->dev->ready == DEVICE_RX) && ether_addr_cmp(&(pkt_hdr->d_addr),\n-\t\t\t\t          &dev_ll->dev->mac_address)) {\n-\n-\t\t\t/* Drop the packet if the TX packet is destined for the TX device. */\n-\t\t\tif (dev_ll->dev->device_fh == dev->device_fh) {\n-\t\t\t\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") TX: Source and destination MAC addresses are the same. Dropping packet.\\n\",\n-\t\t\t\t\t\t\tdev_ll->dev->device_fh);\n-\t\t\t\treturn 0;\n-\t\t\t}\n-\n-\n-\t\t\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") TX: MAC address is local\\n\", dev_ll->dev->device_fh);\n-\n-\t\t\tif (dev_ll->dev->remove) {\n-\t\t\t\t/*drop the packet if the device is marked for removal*/\n-\t\t\t\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Device is marked for removal\\n\", dev_ll->dev->device_fh);\n-\t\t\t} else {\n-\t\t\t\tuint32_t mergeable =\n-\t\t\t\t\tdev_ll->dev->features &\n-\t\t\t\t\t(1 << VIRTIO_NET_F_MRG_RXBUF);\n-\n-\t\t\t\t/*send the packet to the local virtio device*/\n-\t\t\t\tif (likely(mergeable == 0))\n-\t\t\t\t\tret = virtio_dev_rx(dev_ll->dev, &m, 1);\n-\t\t\t\telse\n-\t\t\t\t\tret = virtio_dev_merge_rx(dev_ll->dev,\n-\t\t\t\t\t\t&m, 1);\n-\n-\t\t\t\tif (enable_stats) {\n-\t\t\t\t\trte_atomic64_add(\n-\t\t\t\t\t&dev_statistics[dev_ll->dev->device_fh].rx_total_atomic,\n-\t\t\t\t\t1);\n-\t\t\t\t\trte_atomic64_add(\n-\t\t\t\t\t&dev_statistics[dev_ll->dev->device_fh].rx_atomic,\n-\t\t\t\t\tret);\n-\t\t\t\t\tdev_statistics[dev->device_fh].tx_total++;\n-\t\t\t\t\tdev_statistics[dev->device_fh].tx += ret;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\treturn 0;\n-\t\t}\n-\t\tdev_ll = dev_ll->next;\n-\t}\n-\n-\treturn -1;\n-}\n-\n-/*\n- * This function routes the TX packet to the correct interface. This may be a local device\n- * or the physical port.\n- */\n-static inline void __attribute__((always_inline))\n-virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)\n-{\n-\tstruct mbuf_table *tx_q;\n-\tstruct vlan_ethhdr *vlan_hdr;\n-\tstruct rte_mbuf **m_table;\n-\tstruct rte_mbuf *mbuf, *prev;\n-\tunsigned len, ret, offset = 0;\n-\tconst uint16_t lcore_id = rte_lcore_id();\n-\tstruct virtio_net_data_ll *dev_ll = ll_root_used;\n-\tstruct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;\n-\n-\t/*check if destination is local VM*/\n-\tif ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(dev, m) == 0))\n-\t\treturn;\n-\n-\tif (vm2vm_mode == VM2VM_HARDWARE) {\n-\t\twhile (dev_ll != NULL) {\n-\t\t\tif ((dev_ll->dev->ready == DEVICE_RX)\n-\t\t\t\t&& ether_addr_cmp(&(pkt_hdr->d_addr),\n-\t\t\t\t&dev_ll->dev->mac_address)) {\n-\t\t\t\t/*\n-\t\t\t\t * Drop the packet if the TX packet is\n-\t\t\t\t * destined for the TX device.\n-\t\t\t\t */\n-\t\t\t\tif (dev_ll->dev->device_fh == dev->device_fh) {\n-\t\t\t\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\t\t\"(%\"PRIu64\") TX: Source and destination\"\n-\t\t\t\t\t\" MAC addresses are the same. Dropping \"\n-\t\t\t\t\t\"packet.\\n\",\n-\t\t\t\t\tdev_ll->dev->device_fh);\n-\t\t\t\t\treturn;\n-\t\t\t\t}\n-\t\t\t\toffset = 4;\n-\t\t\t\tvlan_tag =\n-\t\t\t\t(uint16_t)\n-\t\t\t\tvlan_tags[(uint16_t)dev_ll->dev->device_fh];\n-\n-\t\t\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\t\"(%\"PRIu64\") TX: pkt to local VM device id:\"\n-\t\t\t\t\"(%\"PRIu64\") vlan tag: %d.\\n\",\n-\t\t\t\tdev->device_fh, dev_ll->dev->device_fh,\n-\t\t\t\tvlan_tag);\n-\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t\tdev_ll = dev_ll->next;\n-\t\t}\n-\t}\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") TX: MAC address is external\\n\", dev->device_fh);\n-\n-\t/*Add packet to the port tx queue*/\n-\ttx_q = &lcore_tx_queue[lcore_id];\n-\tlen = tx_q->len;\n-\n-\t/* Allocate an mbuf and populate the structure. */\n-\tmbuf = rte_pktmbuf_alloc(mbuf_pool);\n-\tif (unlikely(mbuf == NULL)) {\n-\t\tRTE_LOG(ERR, VHOST_DATA,\n-\t\t\t\"Failed to allocate memory for mbuf.\\n\");\n-\t\treturn;\n-\t}\n-\n-\tmbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN + offset;\n-\tmbuf->pkt.pkt_len = m->pkt.pkt_len + VLAN_HLEN + offset;\n-\tmbuf->pkt.nb_segs = m->pkt.nb_segs;\n-\n-\t/* Copy ethernet header to mbuf. */\n-\trte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);\n-\n-\n-\t/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/\n-\tvlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;\n-\tvlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;\n-\tvlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);\n-\tvlan_hdr->h_vlan_TCI = htons(vlan_tag);\n-\n-\t/* Copy the remaining packet contents to the mbuf. */\n-\trte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),\n-\t\t(const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));\n-\n-\t/* Copy the remaining segments for the whole packet. */\n-\tprev = mbuf;\n-\twhile (m->pkt.next) {\n-\t\t/* Allocate an mbuf and populate the structure. */\n-\t\tstruct rte_mbuf *next_mbuf = rte_pktmbuf_alloc(mbuf_pool);\n-\t\tif (unlikely(next_mbuf == NULL)) {\n-\t\t\trte_pktmbuf_free(mbuf);\n-\t\t\tRTE_LOG(ERR, VHOST_DATA,\n-\t\t\t\t\"Failed to allocate memory for mbuf.\\n\");\n-\t\t\treturn;\n-\t\t}\n-\n-\t\tm = m->pkt.next;\n-\t\tprev->pkt.next = next_mbuf;\n-\t\tprev = next_mbuf;\n-\t\tnext_mbuf->pkt.data_len = m->pkt.data_len;\n-\n-\t\t/* Copy data to next mbuf. */\n-\t\trte_memcpy(rte_pktmbuf_mtod(next_mbuf, void *),\n-\t\t\trte_pktmbuf_mtod(m, const void *), m->pkt.data_len);\n-\t}\n-\n-\ttx_q->m_table[len] = mbuf;\n-\tlen++;\n-\tif (enable_stats) {\n-\t\tdev_statistics[dev->device_fh].tx_total++;\n-\t\tdev_statistics[dev->device_fh].tx++;\n-\t}\n-\n-\tif (unlikely(len == MAX_PKT_BURST)) {\n-\t\tm_table = (struct rte_mbuf **)tx_q->m_table;\n-\t\tret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);\n-\t\t/* Free any buffers not handled by TX and update the port stats. */\n-\t\tif (unlikely(ret < len)) {\n-\t\t\tdo {\n-\t\t\t\trte_pktmbuf_free(m_table[ret]);\n-\t\t\t} while (++ret < len);\n-\t\t}\n-\n-\t\tlen = 0;\n-\t}\n-\n-\ttx_q->len = len;\n-\treturn;\n-}\n-\n-static inline void __attribute__((always_inline))\n-virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)\n-{\n-\tstruct rte_mbuf m;\n-\tstruct vhost_virtqueue *vq;\n-\tstruct vring_desc *desc;\n-\tuint64_t buff_addr = 0;\n-\tuint32_t head[MAX_PKT_BURST];\n-\tuint32_t used_idx;\n-\tuint32_t i;\n-\tuint16_t free_entries, packet_success = 0;\n-\tuint16_t avail_idx;\n-\n-\tvq = dev->virtqueue[VIRTIO_TXQ];\n-\tavail_idx =  *((volatile uint16_t *)&vq->avail->idx);\n-\n-\t/* If there are no available buffers then return. */\n-\tif (vq->last_used_idx == avail_idx)\n-\t\treturn;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") virtio_dev_tx()\\n\", dev->device_fh);\n-\n-\t/* Prefetch available ring to retrieve head indexes. */\n-\trte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);\n-\n-\t/*get the number of free entries in the ring*/\n-\tfree_entries = (avail_idx - vq->last_used_idx);\n-\n-\t/* Limit to MAX_PKT_BURST. */\n-\tif (free_entries > MAX_PKT_BURST)\n-\t\tfree_entries = MAX_PKT_BURST;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Buffers available %d\\n\", dev->device_fh, free_entries);\n-\t/* Retrieve all of the head indexes first to avoid caching issues. */\n-\tfor (i = 0; i < free_entries; i++)\n-\t\thead[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];\n-\n-\t/* Prefetch descriptor index. */\n-\trte_prefetch0(&vq->desc[head[packet_success]]);\n-\trte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);\n-\n-\twhile (packet_success < free_entries) {\n-\t\tdesc = &vq->desc[head[packet_success]];\n-\n-\t\t/* Discard first buffer as it is the virtio header */\n-\t\tdesc = &vq->desc[desc->next];\n-\n-\t\t/* Buffer address translation. */\n-\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n-\t\t/* Prefetch buffer address. */\n-\t\trte_prefetch0((void*)(uintptr_t)buff_addr);\n-\n-\t\tused_idx = vq->last_used_idx & (vq->size - 1);\n-\n-\t\tif (packet_success < (free_entries - 1)) {\n-\t\t\t/* Prefetch descriptor index. */\n-\t\t\trte_prefetch0(&vq->desc[head[packet_success+1]]);\n-\t\t\trte_prefetch0(&vq->used->ring[(used_idx + 1) & (vq->size - 1)]);\n-\t\t}\n-\n-\t\t/* Update used index buffer information. */\n-\t\tvq->used->ring[used_idx].id = head[packet_success];\n-\t\tvq->used->ring[used_idx].len = 0;\n-\n-\t\t/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */\n-\t\tm.pkt.data_len = desc->len;\n-\t\tm.pkt.pkt_len = desc->len;\n-\t\tm.pkt.data = (void*)(uintptr_t)buff_addr;\n-\n-\t\tPRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);\n-\n-\t\t/* If this is the first received packet we need to learn the MAC and setup VMDQ */\n-\t\tif (dev->ready == DEVICE_MAC_LEARNING) {\n-\t\t\tif (dev->remove || (link_vmdq(dev, &m) == -1)) {\n-\t\t\t\t/*discard frame if device is scheduled for removal or a duplicate MAC address is found. */\n-\t\t\t\tpacket_success += free_entries;\n-\t\t\t\tvq->last_used_idx += packet_success;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\t\tvirtio_tx_route(dev, &m, mbuf_pool, (uint16_t)dev->device_fh);\n-\n-\t\tvq->last_used_idx++;\n-\t\tpacket_success++;\n-\t}\n-\n-\trte_compiler_barrier();\n-\tvq->used->idx += packet_success;\n-\t/* Kick guest if required. */\n-\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n-\t\teventfd_write((int)vq->kickfd, 1);\n-}\n-\n-/* This function works for TX packets with mergeable feature enabled. */\n-static inline void __attribute__((always_inline))\n-virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)\n-{\n-\tstruct rte_mbuf *m, *prev;\n-\tstruct vhost_virtqueue *vq;\n-\tstruct vring_desc *desc;\n-\tuint64_t vb_addr = 0;\n-\tuint32_t head[MAX_PKT_BURST];\n-\tuint32_t used_idx;\n-\tuint32_t i;\n-\tuint16_t free_entries, entry_success = 0;\n-\tuint16_t avail_idx;\n-\tuint32_t buf_size = MBUF_SIZE - (sizeof(struct rte_mbuf)\n-\t\t\t+ RTE_PKTMBUF_HEADROOM);\n-\n-\tvq = dev->virtqueue[VIRTIO_TXQ];\n-\tavail_idx =  *((volatile uint16_t *)&vq->avail->idx);\n-\n-\t/* If there are no available buffers then return. */\n-\tif (vq->last_used_idx == avail_idx)\n-\t\treturn;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") virtio_dev_merge_tx()\\n\",\n-\t\tdev->device_fh);\n-\n-\t/* Prefetch available ring to retrieve head indexes. */\n-\trte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);\n-\n-\t/*get the number of free entries in the ring*/\n-\tfree_entries = (avail_idx - vq->last_used_idx);\n-\n-\t/* Limit to MAX_PKT_BURST. */\n-\tfree_entries = RTE_MIN(free_entries, MAX_PKT_BURST);\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Buffers available %d\\n\",\n-\t\tdev->device_fh, free_entries);\n-\t/* Retrieve all of the head indexes first to avoid caching issues. */\n-\tfor (i = 0; i < free_entries; i++)\n-\t\thead[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];\n-\n-\t/* Prefetch descriptor index. */\n-\trte_prefetch0(&vq->desc[head[entry_success]]);\n-\trte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);\n-\n-\twhile (entry_success < free_entries) {\n-\t\tuint32_t vb_avail, vb_offset;\n-\t\tuint32_t seg_avail, seg_offset;\n-\t\tuint32_t cpy_len;\n-\t\tuint32_t seg_num = 0;\n-\t\tstruct rte_mbuf *cur;\n-\t\tuint8_t alloc_err = 0;\n-\n-\t\tdesc = &vq->desc[head[entry_success]];\n-\n-\t\t/* Discard first buffer as it is the virtio header */\n-\t\tdesc = &vq->desc[desc->next];\n-\n-\t\t/* Buffer address translation. */\n-\t\tvb_addr = gpa_to_vva(dev, desc->addr);\n-\t\t/* Prefetch buffer address. */\n-\t\trte_prefetch0((void *)(uintptr_t)vb_addr);\n-\n-\t\tused_idx = vq->last_used_idx & (vq->size - 1);\n-\n-\t\tif (entry_success < (free_entries - 1)) {\n-\t\t\t/* Prefetch descriptor index. */\n-\t\t\trte_prefetch0(&vq->desc[head[entry_success+1]]);\n-\t\t\trte_prefetch0(&vq->used->ring[(used_idx + 1) & (vq->size - 1)]);\n-\t\t}\n-\n-\t\t/* Update used index buffer information. */\n-\t\tvq->used->ring[used_idx].id = head[entry_success];\n-\t\tvq->used->ring[used_idx].len = 0;\n-\n-\t\tvb_offset = 0;\n-\t\tvb_avail = desc->len;\n-\t\tseg_offset = 0;\n-\t\tseg_avail = buf_size;\n-\t\tcpy_len = RTE_MIN(vb_avail, seg_avail);\n-\n-\t\tPRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0);\n-\n-\t\t/* Allocate an mbuf and populate the structure. */\n-\t\tm = rte_pktmbuf_alloc(mbuf_pool);\n-\t\tif (unlikely(m == NULL)) {\n-\t\t\tRTE_LOG(ERR, VHOST_DATA,\n-\t\t\t\t\"Failed to allocate memory for mbuf.\\n\");\n-\t\t\treturn;\n-\t\t}\n-\n-\t\tseg_num++;\n-\t\tcur = m;\n-\t\tprev = m;\n-\t\twhile (cpy_len != 0) {\n-\t\t\trte_memcpy((void *)(rte_pktmbuf_mtod(cur, char *) + seg_offset),\n-\t\t\t\t(void *)((uintptr_t)(vb_addr + vb_offset)),\n-\t\t\t\tcpy_len);\n-\n-\t\t\tseg_offset += cpy_len;\n-\t\t\tvb_offset += cpy_len;\n-\t\t\tvb_avail -= cpy_len;\n-\t\t\tseg_avail -= cpy_len;\n-\n-\t\t\tif (vb_avail != 0) {\n-\t\t\t\t/*\n-\t\t\t\t * The segment reachs to its end,\n-\t\t\t\t * while the virtio buffer in TX vring has\n-\t\t\t\t * more data to be copied.\n-\t\t\t\t */\n-\t\t\t\tcur->pkt.data_len = seg_offset;\n-\t\t\t\tm->pkt.pkt_len += seg_offset;\n-\t\t\t\t/* Allocate mbuf and populate the structure. */\n-\t\t\t\tcur = rte_pktmbuf_alloc(mbuf_pool);\n-\t\t\t\tif (unlikely(cur == NULL)) {\n-\t\t\t\t\tRTE_LOG(ERR, VHOST_DATA, \"Failed to \"\n-\t\t\t\t\t\t\"allocate memory for mbuf.\\n\");\n-\t\t\t\t\trte_pktmbuf_free(m);\n-\t\t\t\t\talloc_err = 1;\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\n-\t\t\t\tseg_num++;\n-\t\t\t\tprev->pkt.next = cur;\n-\t\t\t\tprev = cur;\n-\t\t\t\tseg_offset = 0;\n-\t\t\t\tseg_avail = buf_size;\n-\t\t\t} else {\n-\t\t\t\tif (desc->flags & VRING_DESC_F_NEXT) {\n-\t\t\t\t\t/*\n-\t\t\t\t\t * There are more virtio buffers in\n-\t\t\t\t\t * same vring entry need to be copied.\n-\t\t\t\t\t */\n-\t\t\t\t\tif (seg_avail == 0) {\n-\t\t\t\t\t\t/*\n-\t\t\t\t\t\t * The current segment hasn't\n-\t\t\t\t\t\t * room to accomodate more\n-\t\t\t\t\t\t * data.\n-\t\t\t\t\t\t */\n-\t\t\t\t\t\tcur->pkt.data_len = seg_offset;\n-\t\t\t\t\t\tm->pkt.pkt_len += seg_offset;\n-\t\t\t\t\t\t/*\n-\t\t\t\t\t\t * Allocate an mbuf and\n-\t\t\t\t\t\t * populate the structure.\n-\t\t\t\t\t\t */\n-\t\t\t\t\t\tcur = rte_pktmbuf_alloc(mbuf_pool);\n-\t\t\t\t\t\tif (unlikely(cur == NULL)) {\n-\t\t\t\t\t\t\tRTE_LOG(ERR,\n-\t\t\t\t\t\t\t\tVHOST_DATA,\n-\t\t\t\t\t\t\t\t\"Failed to \"\n-\t\t\t\t\t\t\t\t\"allocate memory \"\n-\t\t\t\t\t\t\t\t\"for mbuf\\n\");\n-\t\t\t\t\t\t\trte_pktmbuf_free(m);\n-\t\t\t\t\t\t\talloc_err = 1;\n-\t\t\t\t\t\t\tbreak;\n-\t\t\t\t\t\t}\n-\t\t\t\t\t\tseg_num++;\n-\t\t\t\t\t\tprev->pkt.next = cur;\n-\t\t\t\t\t\tprev = cur;\n-\t\t\t\t\t\tseg_offset = 0;\n-\t\t\t\t\t\tseg_avail = buf_size;\n-\t\t\t\t\t}\n-\n-\t\t\t\t\tdesc = &vq->desc[desc->next];\n-\n-\t\t\t\t\t/* Buffer address translation. */\n-\t\t\t\t\tvb_addr = gpa_to_vva(dev, desc->addr);\n-\t\t\t\t\t/* Prefetch buffer address. */\n-\t\t\t\t\trte_prefetch0((void *)(uintptr_t)vb_addr);\n-\t\t\t\t\tvb_offset = 0;\n-\t\t\t\t\tvb_avail = desc->len;\n-\n-\t\t\t\t\tPRINT_PACKET(dev, (uintptr_t)vb_addr,\n-\t\t\t\t\t\tdesc->len, 0);\n-\t\t\t\t} else {\n-\t\t\t\t\t/* The whole packet completes. */\n-\t\t\t\t\tcur->pkt.data_len = seg_offset;\n-\t\t\t\t\tm->pkt.pkt_len += seg_offset;\n-\t\t\t\t\tvb_avail = 0;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\tcpy_len = RTE_MIN(vb_avail, seg_avail);\n-\t\t}\n-\n-\t\tif (unlikely(alloc_err == 1))\n-\t\t\tbreak;\n-\n-\t\tm->pkt.nb_segs = seg_num;\n-\n-\t\t/*\n-\t\t * If this is the first received packet we need to learn\n-\t\t * the MAC and setup VMDQ\n-\t\t */\n-\t\tif (dev->ready == DEVICE_MAC_LEARNING) {\n-\t\t\tif (dev->remove || (link_vmdq(dev, m) == -1)) {\n-\t\t\t\t/*\n-\t\t\t\t * Discard frame if device is scheduled for\n-\t\t\t\t * removal or a duplicate MAC address is found.\n-\t\t\t\t */\n-\t\t\t\tentry_success = free_entries;\n-\t\t\t\tvq->last_used_idx += entry_success;\n-\t\t\t\trte_pktmbuf_free(m);\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\n-\t\tvirtio_tx_route(dev, m, mbuf_pool, (uint16_t)dev->device_fh);\n-\t\tvq->last_used_idx++;\n-\t\tentry_success++;\n-\t\trte_pktmbuf_free(m);\n-\t}\n-\n-\trte_compiler_barrier();\n-\tvq->used->idx += entry_success;\n-\t/* Kick guest if required. */\n-\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n-\t\teventfd_write((int)vq->kickfd, 1);\n-\n-}\n-\n-/*\n- * This function is called by each data core. It handles all RX/TX registered with the\n- * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared\n- * with all devices in the main linked list.\n- */\n-static int\n-switch_worker(__attribute__((unused)) void *arg)\n-{\n-\tstruct rte_mempool *mbuf_pool = arg;\n-\tstruct virtio_net *dev = NULL;\n-\tstruct rte_mbuf *pkts_burst[MAX_PKT_BURST];\n-\tstruct virtio_net_data_ll *dev_ll;\n-\tstruct mbuf_table *tx_q;\n-\tvolatile struct lcore_ll_info *lcore_ll;\n-\tconst uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;\n-\tuint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;\n-\tunsigned ret, i;\n-\tconst uint16_t lcore_id = rte_lcore_id();\n-\tconst uint16_t num_cores = (uint16_t)rte_lcore_count();\n-\tuint16_t rx_count = 0;\n-\tuint32_t mergeable = 0;\n-\n-\tRTE_LOG(INFO, VHOST_DATA, \"Procesing on Core %u started\\n\", lcore_id);\n-\tlcore_ll = lcore_info[lcore_id].lcore_ll;\n-\tprev_tsc = 0;\n-\n-\ttx_q = &lcore_tx_queue[lcore_id];\n-\tfor (i = 0; i < num_cores; i ++) {\n-\t\tif (lcore_ids[i] == lcore_id) {\n-\t\t\ttx_q->txq_id = i;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\twhile(1) {\n-\t\tcur_tsc = rte_rdtsc();\n-\t\t/*\n-\t\t * TX burst queue drain\n-\t\t */\n-\t\tdiff_tsc = cur_tsc - prev_tsc;\n-\t\tif (unlikely(diff_tsc > drain_tsc)) {\n-\n-\t\t\tif (tx_q->len) {\n-\t\t\t\tLOG_DEBUG(VHOST_DATA, \"TX queue drained after timeout with burst size %u \\n\", tx_q->len);\n-\n-\t\t\t\t/*Tx any packets in the queue*/\n-\t\t\t\tret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,\n-\t\t\t\t\t\t\t\t\t   (struct rte_mbuf **)tx_q->m_table,\n-\t\t\t\t\t\t\t\t\t   (uint16_t)tx_q->len);\n-\t\t\t\tif (unlikely(ret < tx_q->len)) {\n-\t\t\t\t\tdo {\n-\t\t\t\t\t\trte_pktmbuf_free(tx_q->m_table[ret]);\n-\t\t\t\t\t} while (++ret < tx_q->len);\n-\t\t\t\t}\n-\n-\t\t\t\ttx_q->len = 0;\n-\t\t\t}\n-\n-\t\t\tprev_tsc = cur_tsc;\n-\n-\t\t}\n-\n-\t\trte_prefetch0(lcore_ll->ll_root_used);\n-\t\t/*\n-\t\t * Inform the configuration core that we have exited the linked list and that no devices are\n-\t\t * in use if requested.\n-\t\t */\n-\t\tif (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)\n-\t\t\tlcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;\n-\n-\t\t/*\n-\t\t * Process devices\n-\t\t */\n-\t\tdev_ll = lcore_ll->ll_root_used;\n-\n-\t\twhile (dev_ll != NULL) {\n-\t\t\t/*get virtio device ID*/\n-\t\t\tdev = dev_ll->dev;\n-\t\t\tmergeable =\n-\t\t\t\tdev->features & (1 << VIRTIO_NET_F_MRG_RXBUF);\n-\n-\t\t\tif (dev->remove) {\n-\t\t\t\tdev_ll = dev_ll->next;\n-\t\t\t\tunlink_vmdq(dev);\n-\t\t\t\tdev->ready = DEVICE_SAFE_REMOVE;\n-\t\t\t\tcontinue;\n-\t\t\t}\n-\t\t\tif (likely(dev->ready == DEVICE_RX)) {\n-\t\t\t\t/*Handle guest RX*/\n-\t\t\t\trx_count = rte_eth_rx_burst(ports[0],\n-\t\t\t\t\t(uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);\n-\n-\t\t\t\tif (rx_count) {\n-\t\t\t\t\tif (likely(mergeable == 0))\n-\t\t\t\t\t\tret_count =\n-\t\t\t\t\t\t\tvirtio_dev_rx(dev,\n-\t\t\t\t\t\t\tpkts_burst, rx_count);\n-\t\t\t\t\telse\n-\t\t\t\t\t\tret_count =\n-\t\t\t\t\t\t\tvirtio_dev_merge_rx(dev,\n-\t\t\t\t\t\t\tpkts_burst, rx_count);\n-\n-\t\t\t\t\tif (enable_stats) {\n-\t\t\t\t\t\trte_atomic64_add(\n-\t\t\t\t\t\t&dev_statistics[dev_ll->dev->device_fh].rx_total_atomic,\n-\t\t\t\t\t\trx_count);\n-\t\t\t\t\t\trte_atomic64_add(\n-\t\t\t\t\t\t&dev_statistics[dev_ll->dev->device_fh].rx_atomic, ret_count);\n-\t\t\t\t\t}\n-\t\t\t\t\twhile (likely(rx_count)) {\n-\t\t\t\t\t\trx_count--;\n-\t\t\t\t\t\trte_pktmbuf_free(pkts_burst[rx_count]);\n-\t\t\t\t\t}\n-\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\tif (!dev->remove) {\n-\t\t\t\t/*Handle guest TX*/\n-\t\t\t\tif (likely(mergeable == 0))\n-\t\t\t\t\tvirtio_dev_tx(dev, mbuf_pool);\n-\t\t\t\telse\n-\t\t\t\t\tvirtio_dev_merge_tx(dev, mbuf_pool);\n-\t\t\t}\n-\n-\t\t\t/*move to the next device in the list*/\n-\t\t\tdev_ll = dev_ll->next;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/*\n- * This function gets available ring number for zero copy rx.\n- * Only one thread will call this funciton for a paticular virtio device,\n- * so, it is designed as non-thread-safe function.\n- */\n-static inline uint32_t __attribute__((always_inline))\n-get_available_ring_num_zcp(struct virtio_net *dev)\n-{\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];\n-\tuint16_t avail_idx;\n-\n-\tavail_idx = *((volatile uint16_t *)&vq->avail->idx);\n-\treturn (uint32_t)(avail_idx - vq->last_used_idx_res);\n-}\n-\n-/*\n- * This function gets available ring index for zero copy rx,\n- * it will retry 'burst_rx_retry_num' times till it get enough ring index.\n- * Only one thread will call this funciton for a paticular virtio device,\n- * so, it is designed as non-thread-safe function.\n- */\n-static inline uint32_t __attribute__((always_inline))\n-get_available_ring_index_zcp(struct virtio_net *dev,\n-\tuint16_t *res_base_idx, uint32_t count)\n-{\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];\n-\tuint16_t avail_idx;\n-\tuint32_t retry = 0;\n-\tuint16_t free_entries;\n-\n-\t*res_base_idx = vq->last_used_idx_res;\n-\tavail_idx = *((volatile uint16_t *)&vq->avail->idx);\n-\tfree_entries = (avail_idx - *res_base_idx);\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") in get_available_ring_index_zcp: \"\n-\t\t\t\"avail idx: %d, \"\n-\t\t\t\"res base idx:%d, free entries:%d\\n\",\n-\t\t\tdev->device_fh, avail_idx, *res_base_idx,\n-\t\t\tfree_entries);\n-\n-\t/*\n-\t * If retry is enabled and the queue is full then we wait\n-\t * and retry to avoid packet loss.\n-\t */\n-\tif (enable_retry && unlikely(count > free_entries)) {\n-\t\tfor (retry = 0; retry < burst_rx_retry_num; retry++) {\n-\t\t\trte_delay_us(burst_rx_delay_time);\n-\t\t\tavail_idx = *((volatile uint16_t *)&vq->avail->idx);\n-\t\t\tfree_entries = (avail_idx - *res_base_idx);\n-\t\t\tif (count <= free_entries)\n-\t\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\t/*check that we have enough buffers*/\n-\tif (unlikely(count > free_entries))\n-\t\tcount = free_entries;\n-\n-\tif (unlikely(count == 0)) {\n-\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\"(%\"PRIu64\") Fail in get_available_ring_index_zcp: \"\n-\t\t\t\"avail idx: %d, res base idx:%d, free entries:%d\\n\",\n-\t\t\tdev->device_fh, avail_idx,\n-\t\t\t*res_base_idx, free_entries);\n-\t\treturn 0;\n-\t}\n-\n-\tvq->last_used_idx_res = *res_base_idx + count;\n-\n-\treturn count;\n-}\n-\n-/*\n- * This function put descriptor back to used list.\n- */\n-static inline void __attribute__((always_inline))\n-put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx)\n-{\n-\tuint16_t res_cur_idx = vq->last_used_idx;\n-\tvq->used->ring[res_cur_idx & (vq->size - 1)].id = (uint32_t)desc_idx;\n-\tvq->used->ring[res_cur_idx & (vq->size - 1)].len = 0;\n-\trte_compiler_barrier();\n-\t*(volatile uint16_t *)&vq->used->idx += 1;\n-\tvq->last_used_idx += 1;\n-\n-\t/* Kick the guest if necessary. */\n-\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n-\t\teventfd_write((int)vq->kickfd, 1);\n-}\n-\n-/*\n- * This function get available descriptor from vitio vring and un-attached mbuf\n- * from vpool->ring, and then attach them together. It needs adjust the offset\n- * for buff_addr and phys_addr accroding to PMD implementation, otherwise the\n- * frame data may be put to wrong location in mbuf.\n- */\n-static inline void __attribute__((always_inline))\n-attach_rxmbuf_zcp(struct virtio_net *dev)\n-{\n-\tuint16_t res_base_idx, desc_idx;\n-\tuint64_t buff_addr, phys_addr;\n-\tstruct vhost_virtqueue *vq;\n-\tstruct vring_desc *desc;\n-\tstruct rte_mbuf *mbuf = NULL;\n-\tstruct vpool *vpool;\n-\thpa_type addr_type;\n-\n-\tvpool = &vpool_array[dev->vmdq_rx_q];\n-\tvq = dev->virtqueue[VIRTIO_RXQ];\n-\n-\tdo {\n-\t\tif (unlikely(get_available_ring_index_zcp(dev, &res_base_idx,\n-\t\t\t\t1) != 1))\n-\t\t\treturn;\n-\t\tdesc_idx = vq->avail->ring[(res_base_idx) & (vq->size - 1)];\n-\n-\t\tdesc = &vq->desc[desc_idx];\n-\t\tif (desc->flags & VRING_DESC_F_NEXT) {\n-\t\t\tdesc = &vq->desc[desc->next];\n-\t\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n-\t\t\tphys_addr = gpa_to_hpa(dev, desc->addr, desc->len,\n-\t\t\t\t\t&addr_type);\n-\t\t} else {\n-\t\t\tbuff_addr = gpa_to_vva(dev,\n-\t\t\t\t\tdesc->addr + vq->vhost_hlen);\n-\t\t\tphys_addr = gpa_to_hpa(dev,\n-\t\t\t\t\tdesc->addr + vq->vhost_hlen,\n-\t\t\t\t\tdesc->len, &addr_type);\n-\t\t}\n-\n-\t\tif (unlikely(addr_type == PHYS_ADDR_INVALID)) {\n-\t\t\tRTE_LOG(ERR, VHOST_DATA, \"(%\"PRIu64\") Invalid frame buffer\"\n-\t\t\t\t\" address found when attaching RX frame buffer\"\n-\t\t\t\t\" address!\\n\", dev->device_fh);\n-\t\t\tput_desc_to_used_list_zcp(vq, desc_idx);\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\t/*\n-\t\t * Check if the frame buffer address from guest crosses\n-\t\t * sub-region or not.\n-\t\t */\n-\t\tif (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {\n-\t\t\tRTE_LOG(ERR, VHOST_DATA,\n-\t\t\t\t\"(%\"PRIu64\") Frame buffer address cross \"\n-\t\t\t\t\"sub-regioin found when attaching RX frame \"\n-\t\t\t\t\"buffer address!\\n\",\n-\t\t\t\tdev->device_fh);\n-\t\t\tput_desc_to_used_list_zcp(vq, desc_idx);\n-\t\t\tcontinue;\n-\t\t}\n-\t} while (unlikely(phys_addr == 0));\n-\n-\trte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);\n-\tif (unlikely(mbuf == NULL)) {\n-\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\"(%\"PRIu64\") in attach_rxmbuf_zcp: \"\n-\t\t\t\"ring_sc_dequeue fail.\\n\",\n-\t\t\tdev->device_fh);\n-\t\tput_desc_to_used_list_zcp(vq, desc_idx);\n-\t\treturn;\n-\t}\n-\n-\tif (unlikely(vpool->buf_size > desc->len)) {\n-\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\"(%\"PRIu64\") in attach_rxmbuf_zcp: frame buffer \"\n-\t\t\t\"length(%d) of descriptor idx: %d less than room \"\n-\t\t\t\"size required: %d\\n\",\n-\t\t\tdev->device_fh, desc->len, desc_idx, vpool->buf_size);\n-\t\tput_desc_to_used_list_zcp(vq, desc_idx);\n-\t\trte_ring_sp_enqueue(vpool->ring, (void *)mbuf);\n-\t\treturn;\n-\t}\n-\n-\tmbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);\n-\tmbuf->pkt.data = (void *)(uintptr_t)(buff_addr);\n-\tmbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;\n-\tmbuf->pkt.data_len = desc->len;\n-\tMBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;\n-\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in attach_rxmbuf_zcp: res base idx:%d, \"\n-\t\t\"descriptor idx:%d\\n\",\n-\t\tdev->device_fh, res_base_idx, desc_idx);\n-\n-\t__rte_mbuf_raw_free(mbuf);\n-\n-\treturn;\n-}\n-\n-/*\n- * Detach an attched packet mbuf -\n- *  - restore original mbuf address and length values.\n- *  - reset pktmbuf data and data_len to their default values.\n- *  All other fields of the given packet mbuf will be left intact.\n- *\n- * @param m\n- *   The attached packet mbuf.\n- */\n-static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)\n-{\n-\tconst struct rte_mempool *mp = m->pool;\n-\tvoid *buf = RTE_MBUF_TO_BADDR(m);\n-\tuint32_t buf_ofs;\n-\tuint32_t buf_len = mp->elt_size - sizeof(*m);\n-\tm->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m);\n-\n-\tm->buf_addr = buf;\n-\tm->buf_len = (uint16_t)buf_len;\n-\n-\tbuf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?\n-\t\t\tRTE_PKTMBUF_HEADROOM : m->buf_len;\n-\tm->pkt.data = (char *) m->buf_addr + buf_ofs;\n-\n-\tm->pkt.data_len = 0;\n-}\n-\n-/*\n- * This function is called after packets have been transimited. It fetchs mbuf\n- * from vpool->pool, detached it and put into vpool->ring. It also update the\n- * used index and kick the guest if necessary.\n- */\n-static inline uint32_t __attribute__((always_inline))\n-txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool)\n-{\n-\tstruct rte_mbuf *mbuf;\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];\n-\tuint32_t used_idx = vq->last_used_idx & (vq->size - 1);\n-\tuint32_t index = 0;\n-\tuint32_t mbuf_count = rte_mempool_count(vpool->pool);\n-\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in txmbuf_clean_zcp: mbuf count in mempool before \"\n-\t\t\"clean is: %d\\n\",\n-\t\tdev->device_fh, mbuf_count);\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in txmbuf_clean_zcp: mbuf count in  ring before \"\n-\t\t\"clean  is : %d\\n\",\n-\t\tdev->device_fh, rte_ring_count(vpool->ring));\n-\n-\tfor (index = 0; index < mbuf_count; index++) {\n-\t\tmbuf = __rte_mbuf_raw_alloc(vpool->pool);\n-\t\tif (likely(RTE_MBUF_INDIRECT(mbuf)))\n-\t\t\tpktmbuf_detach_zcp(mbuf);\n-\t\trte_ring_sp_enqueue(vpool->ring, mbuf);\n-\n-\t\t/* Update used index buffer information. */\n-\t\tvq->used->ring[used_idx].id = MBUF_HEADROOM_UINT32(mbuf);\n-\t\tvq->used->ring[used_idx].len = 0;\n-\n-\t\tused_idx = (used_idx + 1) & (vq->size - 1);\n-\t}\n-\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in txmbuf_clean_zcp: mbuf count in mempool after \"\n-\t\t\"clean is: %d\\n\",\n-\t\tdev->device_fh, rte_mempool_count(vpool->pool));\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in txmbuf_clean_zcp: mbuf count in  ring after \"\n-\t\t\"clean  is : %d\\n\",\n-\t\tdev->device_fh, rte_ring_count(vpool->ring));\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in txmbuf_clean_zcp: before updated \"\n-\t\t\"vq->last_used_idx:%d\\n\",\n-\t\tdev->device_fh, vq->last_used_idx);\n-\n-\tvq->last_used_idx += mbuf_count;\n-\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in txmbuf_clean_zcp: after updated \"\n-\t\t\"vq->last_used_idx:%d\\n\",\n-\t\tdev->device_fh, vq->last_used_idx);\n-\n-\trte_compiler_barrier();\n-\n-\t*(volatile uint16_t *)&vq->used->idx += mbuf_count;\n-\n-\t/* Kick guest if required. */\n-\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n-\t\teventfd_write((int)vq->kickfd, 1);\n-\n-\treturn 0;\n-}\n-\n-/*\n- * This function is called when a virtio device is destroy.\n- * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring.\n- */\n-static void mbuf_destroy_zcp(struct vpool *vpool)\n-{\n-\tstruct rte_mbuf *mbuf = NULL;\n-\tuint32_t index, mbuf_count = rte_mempool_count(vpool->pool);\n-\n-\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\"in mbuf_destroy_zcp: mbuf count in mempool before \"\n-\t\t\"mbuf_destroy_zcp is: %d\\n\",\n-\t\tmbuf_count);\n-\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\"in mbuf_destroy_zcp: mbuf count in  ring before \"\n-\t\t\"mbuf_destroy_zcp  is : %d\\n\",\n-\t\trte_ring_count(vpool->ring));\n-\n-\tfor (index = 0; index < mbuf_count; index++) {\n-\t\tmbuf = __rte_mbuf_raw_alloc(vpool->pool);\n-\t\tif (likely(mbuf != NULL)) {\n-\t\t\tif (likely(RTE_MBUF_INDIRECT(mbuf)))\n-\t\t\t\tpktmbuf_detach_zcp(mbuf);\n-\t\t\trte_ring_sp_enqueue(vpool->ring, (void *)mbuf);\n-\t\t}\n-\t}\n-\n-\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\"in mbuf_destroy_zcp: mbuf count in mempool after \"\n-\t\t\"mbuf_destroy_zcp is: %d\\n\",\n-\t\trte_mempool_count(vpool->pool));\n-\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\"in mbuf_destroy_zcp: mbuf count in ring after \"\n-\t\t\"mbuf_destroy_zcp is : %d\\n\",\n-\t\trte_ring_count(vpool->ring));\n-}\n-\n-/*\n- * This function update the use flag and counter.\n- */\n-static inline uint32_t __attribute__((always_inline))\n-virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,\n-\tuint32_t count)\n-{\n-\tstruct vhost_virtqueue *vq;\n-\tstruct vring_desc *desc;\n-\tstruct rte_mbuf *buff;\n-\t/* The virtio_hdr is initialised to 0. */\n-\tstruct virtio_net_hdr_mrg_rxbuf virtio_hdr\n-\t\t= {{0, 0, 0, 0, 0, 0}, 0};\n-\tuint64_t buff_hdr_addr = 0;\n-\tuint32_t head[MAX_PKT_BURST], packet_len = 0;\n-\tuint32_t head_idx, packet_success = 0;\n-\tuint16_t res_cur_idx;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") virtio_dev_rx()\\n\", dev->device_fh);\n-\n-\tif (count == 0)\n-\t\treturn 0;\n-\n-\tvq = dev->virtqueue[VIRTIO_RXQ];\n-\tcount = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;\n-\n-\tres_cur_idx = vq->last_used_idx;\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Current Index %d| End Index %d\\n\",\n-\t\tdev->device_fh, res_cur_idx, res_cur_idx + count);\n-\n-\t/* Retrieve all of the head indexes first to avoid caching issues. */\n-\tfor (head_idx = 0; head_idx < count; head_idx++)\n-\t\thead[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]);\n-\n-\t/*Prefetch descriptor index. */\n-\trte_prefetch0(&vq->desc[head[packet_success]]);\n-\n-\twhile (packet_success != count) {\n-\t\t/* Get descriptor from available ring */\n-\t\tdesc = &vq->desc[head[packet_success]];\n-\n-\t\tbuff = pkts[packet_success];\n-\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\"(%\"PRIu64\") in dev_rx_zcp: update the used idx for \"\n-\t\t\t\"pkt[%d] descriptor idx: %d\\n\",\n-\t\t\tdev->device_fh, packet_success,\n-\t\t\tMBUF_HEADROOM_UINT32(buff));\n-\n-\t\tPRINT_PACKET(dev,\n-\t\t\t(uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr)\n-\t\t\t+ RTE_PKTMBUF_HEADROOM),\n-\t\t\trte_pktmbuf_data_len(buff), 0);\n-\n-\t\t/* Buffer address translation for virtio header. */\n-\t\tbuff_hdr_addr = gpa_to_vva(dev, desc->addr);\n-\t\tpacket_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;\n-\n-\t\t/*\n-\t\t * If the descriptors are chained the header and data are\n-\t\t * placed in separate buffers.\n-\t\t */\n-\t\tif (desc->flags & VRING_DESC_F_NEXT) {\n-\t\t\tdesc->len = vq->vhost_hlen;\n-\t\t\tdesc = &vq->desc[desc->next];\n-\t\t\tdesc->len = rte_pktmbuf_data_len(buff);\n-\t\t} else {\n-\t\t\tdesc->len = packet_len;\n-\t\t}\n-\n-\t\t/* Update used ring with desc information */\n-\t\tvq->used->ring[res_cur_idx & (vq->size - 1)].id\n-\t\t\t= head[packet_success];\n-\t\tvq->used->ring[res_cur_idx & (vq->size - 1)].len\n-\t\t\t= packet_len;\n-\t\tres_cur_idx++;\n-\t\tpacket_success++;\n-\n-\t\t/* A header is required per buffer. */\n-\t\trte_memcpy((void *)(uintptr_t)buff_hdr_addr,\n-\t\t\t(const void *)&virtio_hdr, vq->vhost_hlen);\n-\n-\t\tPRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);\n-\n-\t\tif (likely(packet_success < count)) {\n-\t\t\t/* Prefetch descriptor index. */\n-\t\t\trte_prefetch0(&vq->desc[head[packet_success]]);\n-\t\t}\n-\t}\n-\n-\trte_compiler_barrier();\n-\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in dev_rx_zcp: before update used idx: \"\n-\t\t\"vq.last_used_idx: %d, vq->used->idx: %d\\n\",\n-\t\tdev->device_fh, vq->last_used_idx, vq->used->idx);\n-\n-\t*(volatile uint16_t *)&vq->used->idx += count;\n-\tvq->last_used_idx += count;\n-\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in dev_rx_zcp: after  update used idx: \"\n-\t\t\"vq.last_used_idx: %d, vq->used->idx: %d\\n\",\n-\t\tdev->device_fh, vq->last_used_idx, vq->used->idx);\n-\n-\t/* Kick the guest if necessary. */\n-\tif (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))\n-\t\teventfd_write((int)vq->kickfd, 1);\n-\n-\treturn count;\n-}\n-\n-/*\n- * This function routes the TX packet to the correct interface.\n- * This may be a local device or the physical port.\n- */\n-static inline void __attribute__((always_inline))\n-virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,\n-\tuint32_t desc_idx, uint8_t need_copy)\n-{\n-\tstruct mbuf_table *tx_q;\n-\tstruct rte_mbuf **m_table;\n-\tstruct rte_mbuf *mbuf = NULL;\n-\tunsigned len, ret, offset = 0;\n-\tstruct vpool *vpool;\n-\tstruct virtio_net_data_ll *dev_ll = ll_root_used;\n-\tstruct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;\n-\tuint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];\n-\n-\t/*Add packet to the port tx queue*/\n-\ttx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q];\n-\tlen = tx_q->len;\n-\n-\t/* Allocate an mbuf and populate the structure. */\n-\tvpool = &vpool_array[MAX_QUEUES + (uint16_t)dev->vmdq_rx_q];\n-\trte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);\n-\tif (unlikely(mbuf == NULL)) {\n-\t\tstruct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];\n-\t\tRTE_LOG(ERR, VHOST_DATA,\n-\t\t\t\"(%\"PRIu64\") Failed to allocate memory for mbuf.\\n\",\n-\t\t\tdev->device_fh);\n-\t\tput_desc_to_used_list_zcp(vq, desc_idx);\n-\t\treturn;\n-\t}\n-\n-\tif (vm2vm_mode == VM2VM_HARDWARE) {\n-\t\t/* Avoid using a vlan tag from any vm for external pkt, such as\n-\t\t * vlan_tags[dev->device_fh], oterwise, it conflicts when pool\n-\t\t * selection, MAC address determines it as an external pkt\n-\t\t * which should go to network, while vlan tag determine it as\n-\t\t * a vm2vm pkt should forward to another vm. Hardware confuse\n-\t\t * such a ambiguous situation, so pkt will lost.\n-\t\t */\n-\t\tvlan_tag = external_pkt_default_vlan_tag;\n-\t\twhile (dev_ll != NULL) {\n-\t\t\tif (likely(dev_ll->dev->ready == DEVICE_RX) &&\n-\t\t\t\tether_addr_cmp(&(pkt_hdr->d_addr),\n-\t\t\t\t&dev_ll->dev->mac_address)) {\n-\n-\t\t\t\t/*\n-\t\t\t\t * Drop the packet if the TX packet is destined\n-\t\t\t\t * for the TX device.\n-\t\t\t\t */\n-\t\t\t\tif (unlikely(dev_ll->dev->device_fh\n-\t\t\t\t\t== dev->device_fh)) {\n-\t\t\t\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\t\t\"(%\"PRIu64\") TX: Source and destination\"\n-\t\t\t\t\t\"MAC addresses are the same. Dropping \"\n-\t\t\t\t\t\"packet.\\n\",\n-\t\t\t\t\tdev_ll->dev->device_fh);\n-\t\t\t\t\tMBUF_HEADROOM_UINT32(mbuf)\n-\t\t\t\t\t\t= (uint32_t)desc_idx;\n-\t\t\t\t\t__rte_mbuf_raw_free(mbuf);\n-\t\t\t\t\treturn;\n-\t\t\t\t}\n-\n-\t\t\t\t/*\n-\t\t\t\t * Packet length offset 4 bytes for HW vlan\n-\t\t\t\t * strip when L2 switch back.\n-\t\t\t\t */\n-\t\t\t\toffset = 4;\n-\t\t\t\tvlan_tag =\n-\t\t\t\t(uint16_t)\n-\t\t\t\tvlan_tags[(uint16_t)dev_ll->dev->device_fh];\n-\n-\t\t\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\t\"(%\"PRIu64\") TX: pkt to local VM device id:\"\n-\t\t\t\t\"(%\"PRIu64\") vlan tag: %d.\\n\",\n-\t\t\t\tdev->device_fh, dev_ll->dev->device_fh,\n-\t\t\t\tvlan_tag);\n-\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t\tdev_ll = dev_ll->next;\n-\t\t}\n-\t}\n-\n-\tmbuf->pkt.nb_segs = m->pkt.nb_segs;\n-\tmbuf->pkt.next = m->pkt.next;\n-\tmbuf->pkt.data_len = m->pkt.data_len + offset;\n-\tmbuf->pkt.pkt_len = mbuf->pkt.data_len;\n-\tif (unlikely(need_copy)) {\n-\t\t/* Copy the packet contents to the mbuf. */\n-\t\trte_memcpy((void *)((uint8_t *)mbuf->pkt.data),\n-\t\t\t(const void *) ((uint8_t *)m->pkt.data),\n-\t\t\tm->pkt.data_len);\n-\t} else {\n-\t\tmbuf->pkt.data = m->pkt.data;\n-\t\tmbuf->buf_physaddr = m->buf_physaddr;\n-\t\tmbuf->buf_addr = m->buf_addr;\n-\t}\n-\tmbuf->ol_flags = PKT_TX_VLAN_PKT;\n-\tmbuf->pkt.vlan_macip.f.vlan_tci = vlan_tag;\n-\tmbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);\n-\tmbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);\n-\tMBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;\n-\n-\ttx_q->m_table[len] = mbuf;\n-\tlen++;\n-\n-\tLOG_DEBUG(VHOST_DATA,\n-\t\t\"(%\"PRIu64\") in tx_route_zcp: pkt: nb_seg: %d, next:%s\\n\",\n-\t\tdev->device_fh,\n-\t\tmbuf->pkt.nb_segs,\n-\t\t(mbuf->pkt.next == NULL) ? \"null\" : \"non-null\");\n-\n-\tif (enable_stats) {\n-\t\tdev_statistics[dev->device_fh].tx_total++;\n-\t\tdev_statistics[dev->device_fh].tx++;\n-\t}\n-\n-\tif (unlikely(len == MAX_PKT_BURST)) {\n-\t\tm_table = (struct rte_mbuf **)tx_q->m_table;\n-\t\tret = rte_eth_tx_burst(ports[0],\n-\t\t\t(uint16_t)tx_q->txq_id, m_table, (uint16_t) len);\n-\n-\t\t/*\n-\t\t * Free any buffers not handled by TX and update\n-\t\t * the port stats.\n-\t\t */\n-\t\tif (unlikely(ret < len)) {\n-\t\t\tdo {\n-\t\t\t\trte_pktmbuf_free(m_table[ret]);\n-\t\t\t} while (++ret < len);\n-\t\t}\n-\n-\t\tlen = 0;\n-\t\ttxmbuf_clean_zcp(dev, vpool);\n-\t}\n-\n-\ttx_q->len = len;\n-\n-\treturn;\n-}\n-\n-/*\n- * This function TX all available packets in virtio TX queue for one\n- * virtio-net device. If it is first packet, it learns MAC address and\n- * setup VMDQ.\n- */\n-static inline void __attribute__((always_inline))\n-virtio_dev_tx_zcp(struct virtio_net *dev)\n-{\n-\tstruct rte_mbuf m;\n-\tstruct vhost_virtqueue *vq;\n-\tstruct vring_desc *desc;\n-\tuint64_t buff_addr = 0, phys_addr;\n-\tuint32_t head[MAX_PKT_BURST];\n-\tuint32_t i;\n-\tuint16_t free_entries, packet_success = 0;\n-\tuint16_t avail_idx;\n-\tuint8_t need_copy = 0;\n-\thpa_type addr_type;\n-\n-\tvq = dev->virtqueue[VIRTIO_TXQ];\n-\tavail_idx =  *((volatile uint16_t *)&vq->avail->idx);\n-\n-\t/* If there are no available buffers then return. */\n-\tif (vq->last_used_idx_res == avail_idx)\n-\t\treturn;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") virtio_dev_tx()\\n\", dev->device_fh);\n-\n-\t/* Prefetch available ring to retrieve head indexes. */\n-\trte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]);\n-\n-\t/* Get the number of free entries in the ring */\n-\tfree_entries = (avail_idx - vq->last_used_idx_res);\n-\n-\t/* Limit to MAX_PKT_BURST. */\n-\tfree_entries\n-\t\t= (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries;\n-\n-\tLOG_DEBUG(VHOST_DATA, \"(%\"PRIu64\") Buffers available %d\\n\",\n-\t\tdev->device_fh, free_entries);\n-\n-\t/* Retrieve all of the head indexes first to avoid caching issues. */\n-\tfor (i = 0; i < free_entries; i++)\n-\t\thead[i]\n-\t\t\t= vq->avail->ring[(vq->last_used_idx_res + i)\n-\t\t\t& (vq->size - 1)];\n-\n-\tvq->last_used_idx_res += free_entries;\n-\n-\t/* Prefetch descriptor index. */\n-\trte_prefetch0(&vq->desc[head[packet_success]]);\n-\trte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);\n-\n-\twhile (packet_success < free_entries) {\n-\t\tdesc = &vq->desc[head[packet_success]];\n-\n-\t\t/* Discard first buffer as it is the virtio header */\n-\t\tdesc = &vq->desc[desc->next];\n-\n-\t\t/* Buffer address translation. */\n-\t\tbuff_addr = gpa_to_vva(dev, desc->addr);\n-\t\tphys_addr = gpa_to_hpa(dev, desc->addr, desc->len, &addr_type);\n-\n-\t\tif (likely(packet_success < (free_entries - 1)))\n-\t\t\t/* Prefetch descriptor index. */\n-\t\t\trte_prefetch0(&vq->desc[head[packet_success + 1]]);\n-\n-\t\tif (unlikely(addr_type == PHYS_ADDR_INVALID)) {\n-\t\t\tRTE_LOG(ERR, VHOST_DATA,\n-\t\t\t\t\"(%\"PRIu64\") Invalid frame buffer address found\"\n-\t\t\t\t\"when TX packets!\\n\",\n-\t\t\t\tdev->device_fh);\n-\t\t\tpacket_success++;\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\t/* Prefetch buffer address. */\n-\t\trte_prefetch0((void *)(uintptr_t)buff_addr);\n-\n-\t\t/*\n-\t\t * Setup dummy mbuf. This is copied to a real mbuf if\n-\t\t * transmitted out the physical port.\n-\t\t */\n-\t\tm.pkt.data_len = desc->len;\n-\t\tm.pkt.nb_segs = 1;\n-\t\tm.pkt.next = NULL;\n-\t\tm.pkt.data = (void *)(uintptr_t)buff_addr;\n-\t\tm.buf_addr = m.pkt.data;\n-\t\tm.buf_physaddr = phys_addr;\n-\n-\t\t/*\n-\t\t * Check if the frame buffer address from guest crosses\n-\t\t * sub-region or not.\n-\t\t */\n-\t\tif (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {\n-\t\t\tRTE_LOG(ERR, VHOST_DATA,\n-\t\t\t\t\"(%\"PRIu64\") Frame buffer address cross \"\n-\t\t\t\t\"sub-regioin found when attaching TX frame \"\n-\t\t\t\t\"buffer address!\\n\",\n-\t\t\t\tdev->device_fh);\n-\t\t\tneed_copy = 1;\n-\t\t} else\n-\t\t\tneed_copy = 0;\n-\n-\t\tPRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);\n-\n-\t\t/*\n-\t\t * If this is the first received packet we need to learn\n-\t\t * the MAC and setup VMDQ\n-\t\t */\n-\t\tif (unlikely(dev->ready == DEVICE_MAC_LEARNING)) {\n-\t\t\tif (dev->remove || (link_vmdq(dev, &m) == -1)) {\n-\t\t\t\t/*\n-\t\t\t\t * Discard frame if device is scheduled for\n-\t\t\t\t * removal or a duplicate MAC address is found.\n-\t\t\t\t */\n-\t\t\t\tpacket_success += free_entries;\n-\t\t\t\tvq->last_used_idx += packet_success;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\n-\t\tvirtio_tx_route_zcp(dev, &m, head[packet_success], need_copy);\n-\t\tpacket_success++;\n-\t}\n-}\n-\n-/*\n- * This function is called by each data core. It handles all RX/TX registered\n- * with the core. For TX the specific lcore linked list is used. For RX, MAC\n- * addresses are compared with all devices in the main linked list.\n- */\n-static int\n-switch_worker_zcp(__attribute__((unused)) void *arg)\n-{\n-\tstruct virtio_net *dev = NULL;\n-\tstruct rte_mbuf *pkts_burst[MAX_PKT_BURST];\n-\tstruct virtio_net_data_ll *dev_ll;\n-\tstruct mbuf_table *tx_q;\n-\tvolatile struct lcore_ll_info *lcore_ll;\n-\tconst uint64_t drain_tsc\n-\t\t= (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S\n-\t\t* BURST_TX_DRAIN_US;\n-\tuint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;\n-\tunsigned ret;\n-\tconst uint16_t lcore_id = rte_lcore_id();\n-\tuint16_t count_in_ring, rx_count = 0;\n-\n-\tRTE_LOG(INFO, VHOST_DATA, \"Procesing on Core %u started\\n\", lcore_id);\n-\n-\tlcore_ll = lcore_info[lcore_id].lcore_ll;\n-\tprev_tsc = 0;\n-\n-\twhile (1) {\n-\t\tcur_tsc = rte_rdtsc();\n-\n-\t\t/* TX burst queue drain */\n-\t\tdiff_tsc = cur_tsc - prev_tsc;\n-\t\tif (unlikely(diff_tsc > drain_tsc)) {\n-\t\t\t/*\n-\t\t\t * Get mbuf from vpool.pool and detach mbuf and\n-\t\t\t * put back into vpool.ring.\n-\t\t\t */\n-\t\t\tdev_ll = lcore_ll->ll_root_used;\n-\t\t\twhile ((dev_ll != NULL) && (dev_ll->dev != NULL)) {\n-\t\t\t\t/* Get virtio device ID */\n-\t\t\t\tdev = dev_ll->dev;\n-\n-\t\t\t\tif (likely(!dev->remove)) {\n-\t\t\t\t\ttx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q];\n-\t\t\t\t\tif (tx_q->len) {\n-\t\t\t\t\t\tLOG_DEBUG(VHOST_DATA,\n-\t\t\t\t\t\t\"TX queue drained after timeout\"\n-\t\t\t\t\t\t\" with burst size %u\\n\",\n-\t\t\t\t\t\ttx_q->len);\n-\n-\t\t\t\t\t\t/*\n-\t\t\t\t\t\t * Tx any packets in the queue\n-\t\t\t\t\t\t */\n-\t\t\t\t\t\tret = rte_eth_tx_burst(\n-\t\t\t\t\t\t\tports[0],\n-\t\t\t\t\t\t\t(uint16_t)tx_q->txq_id,\n-\t\t\t\t\t\t\t(struct rte_mbuf **)\n-\t\t\t\t\t\t\ttx_q->m_table,\n-\t\t\t\t\t\t\t(uint16_t)tx_q->len);\n-\t\t\t\t\t\tif (unlikely(ret < tx_q->len)) {\n-\t\t\t\t\t\t\tdo {\n-\t\t\t\t\t\t\t\trte_pktmbuf_free(\n-\t\t\t\t\t\t\t\t\ttx_q->m_table[ret]);\n-\t\t\t\t\t\t\t} while (++ret < tx_q->len);\n-\t\t\t\t\t\t}\n-\t\t\t\t\t\ttx_q->len = 0;\n-\n-\t\t\t\t\t\ttxmbuf_clean_zcp(dev,\n-\t\t\t\t\t\t\t&vpool_array[MAX_QUEUES+dev->vmdq_rx_q]);\n-\t\t\t\t\t}\n-\t\t\t\t}\n-\t\t\t\tdev_ll = dev_ll->next;\n-\t\t\t}\n-\t\t\tprev_tsc = cur_tsc;\n-\t\t}\n-\n-\t\trte_prefetch0(lcore_ll->ll_root_used);\n-\n-\t\t/*\n-\t\t * Inform the configuration core that we have exited the linked\n-\t\t * list and that no devices are in use if requested.\n-\t\t */\n-\t\tif (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)\n-\t\t\tlcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;\n-\n-\t\t/* Process devices */\n-\t\tdev_ll = lcore_ll->ll_root_used;\n-\n-\t\twhile ((dev_ll != NULL) && (dev_ll->dev != NULL)) {\n-\t\t\tdev = dev_ll->dev;\n-\t\t\tif (unlikely(dev->remove)) {\n-\t\t\t\tdev_ll = dev_ll->next;\n-\t\t\t\tunlink_vmdq(dev);\n-\t\t\t\tdev->ready = DEVICE_SAFE_REMOVE;\n-\t\t\t\tcontinue;\n-\t\t\t}\n-\n-\t\t\tif (likely(dev->ready == DEVICE_RX)) {\n-\t\t\t\tuint32_t index = dev->vmdq_rx_q;\n-\t\t\t\tuint16_t i;\n-\t\t\t\tcount_in_ring\n-\t\t\t\t= rte_ring_count(vpool_array[index].ring);\n-\t\t\t\tuint16_t free_entries\n-\t\t\t\t= (uint16_t)get_available_ring_num_zcp(dev);\n-\n-\t\t\t\t/*\n-\t\t\t\t * Attach all mbufs in vpool.ring and put back\n-\t\t\t\t * into vpool.pool.\n-\t\t\t\t */\n-\t\t\t\tfor (i = 0;\n-\t\t\t\ti < RTE_MIN(free_entries,\n-\t\t\t\tRTE_MIN(count_in_ring, MAX_PKT_BURST));\n-\t\t\t\ti++)\n-\t\t\t\t\tattach_rxmbuf_zcp(dev);\n-\n-\t\t\t\t/* Handle guest RX */\n-\t\t\t\trx_count = rte_eth_rx_burst(ports[0],\n-\t\t\t\t\t(uint16_t)dev->vmdq_rx_q, pkts_burst,\n-\t\t\t\t\tMAX_PKT_BURST);\n-\n-\t\t\t\tif (rx_count) {\n-\t\t\t\t\tret_count = virtio_dev_rx_zcp(dev,\n-\t\t\t\t\t\t\tpkts_burst, rx_count);\n-\t\t\t\t\tif (enable_stats) {\n-\t\t\t\t\t\tdev_statistics[dev->device_fh].rx_total\n-\t\t\t\t\t\t\t+= rx_count;\n-\t\t\t\t\t\tdev_statistics[dev->device_fh].rx\n-\t\t\t\t\t\t\t+= ret_count;\n-\t\t\t\t\t}\n-\t\t\t\t\twhile (likely(rx_count)) {\n-\t\t\t\t\t\trx_count--;\n-\t\t\t\t\t\tpktmbuf_detach_zcp(\n-\t\t\t\t\t\t\tpkts_burst[rx_count]);\n-\t\t\t\t\t\trte_ring_sp_enqueue(\n-\t\t\t\t\t\t\tvpool_array[index].ring,\n-\t\t\t\t\t\t\t(void *)pkts_burst[rx_count]);\n-\t\t\t\t\t}\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\tif (likely(!dev->remove))\n-\t\t\t\t/* Handle guest TX */\n-\t\t\t\tvirtio_dev_tx_zcp(dev);\n-\n-\t\t\t/* Move to the next device in the list */\n-\t\t\tdev_ll = dev_ll->next;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-\n-/*\n- * Add an entry to a used linked list. A free entry must first be found\n- * in the free linked list using get_data_ll_free_entry();\n- */\n-static void\n-add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,\n-\tstruct virtio_net_data_ll *ll_dev)\n-{\n-\tstruct virtio_net_data_ll *ll = *ll_root_addr;\n-\n-\t/* Set next as NULL and use a compiler barrier to avoid reordering. */\n-\tll_dev->next = NULL;\n-\trte_compiler_barrier();\n-\n-\t/* If ll == NULL then this is the first device. */\n-\tif (ll) {\n-\t\t/* Increment to the tail of the linked list. */\n-\t\twhile ((ll->next != NULL) )\n-\t\t\tll = ll->next;\n-\n-\t\tll->next = ll_dev;\n-\t} else {\n-\t\t*ll_root_addr = ll_dev;\n-\t}\n-}\n-\n-/*\n- * Remove an entry from a used linked list. The entry must then be added to\n- * the free linked list using put_data_ll_free_entry().\n- */\n-static void\n-rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,\n-\tstruct virtio_net_data_ll *ll_dev,\n-\tstruct virtio_net_data_ll *ll_dev_last)\n-{\n-\tstruct virtio_net_data_ll *ll = *ll_root_addr;\n-\n-\tif (unlikely((ll == NULL) || (ll_dev == NULL)))\n-\t\treturn;\n-\n-\tif (ll_dev == ll)\n-\t\t*ll_root_addr = ll_dev->next;\n-\telse\n-\t\tif (likely(ll_dev_last != NULL))\n-\t\t\tll_dev_last->next = ll_dev->next;\n-\t\telse\n-\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Remove entry form ll failed.\\n\");\n-}\n-\n-/*\n- * Find and return an entry from the free linked list.\n- */\n-static struct virtio_net_data_ll *\n-get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)\n-{\n-\tstruct virtio_net_data_ll *ll_free = *ll_root_addr;\n-\tstruct virtio_net_data_ll *ll_dev;\n-\n-\tif (ll_free == NULL)\n-\t\treturn NULL;\n-\n-\tll_dev = ll_free;\n-\t*ll_root_addr = ll_free->next;\n-\n-\treturn ll_dev;\n-}\n-\n-/*\n- * Place an entry back on to the free linked list.\n- */\n-static void\n-put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,\n-\tstruct virtio_net_data_ll *ll_dev)\n-{\n-\tstruct virtio_net_data_ll *ll_free = *ll_root_addr;\n-\n-\tif (ll_dev == NULL)\n-\t\treturn;\n-\n-\tll_dev->next = ll_free;\n-\t*ll_root_addr = ll_dev;\n-}\n-\n-/*\n- * Creates a linked list of a given size.\n- */\n-static struct virtio_net_data_ll *\n-alloc_data_ll(uint32_t size)\n-{\n-\tstruct virtio_net_data_ll *ll_new;\n-\tuint32_t i;\n-\n-\t/* Malloc and then chain the linked list. */\n-\tll_new = malloc(size * sizeof(struct virtio_net_data_ll));\n-\tif (ll_new == NULL) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Failed to allocate memory for ll_new.\\n\");\n-\t\treturn NULL;\n-\t}\n-\n-\tfor (i = 0; i < size - 1; i++) {\n-\t\tll_new[i].dev = NULL;\n-\t\tll_new[i].next = &ll_new[i+1];\n-\t}\n-\tll_new[i].next = NULL;\n-\n-\treturn (ll_new);\n-}\n-\n-/*\n- * Create the main linked list along with each individual cores linked list. A used and a free list\n- * are created to manage entries.\n- */\n-static int\n-init_data_ll (void)\n-{\n-\tint lcore;\n-\n-\tRTE_LCORE_FOREACH_SLAVE(lcore) {\n-\t\tlcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));\n-\t\tif (lcore_info[lcore].lcore_ll == NULL) {\n-\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Failed to allocate memory for lcore_ll.\\n\");\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tlcore_info[lcore].lcore_ll->device_num = 0;\n-\t\tlcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;\n-\t\tlcore_info[lcore].lcore_ll->ll_root_used = NULL;\n-\t\tif (num_devices % num_switching_cores)\n-\t\t\tlcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);\n-\t\telse\n-\t\t\tlcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);\n-\t}\n-\n-\t/* Allocate devices up to a maximum of MAX_DEVICES. */\n-\tll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Set virtqueue flags so that we do not receive interrupts.\n- */\n-static void\n-set_irq_status (struct virtio_net *dev)\n-{\n-\tdev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;\n-\tdev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;\n-}\n-\n-/*\n- * Remove a device from the specific data core linked list and from the main linked list. Synchonization\n- * occurs through the use of the lcore dev_removal_flag. Device is made volatile here to avoid re-ordering\n- * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.\n- */\n-static void\n-destroy_device (volatile struct virtio_net *dev)\n-{\n-\tstruct virtio_net_data_ll *ll_lcore_dev_cur;\n-\tstruct virtio_net_data_ll *ll_main_dev_cur;\n-\tstruct virtio_net_data_ll *ll_lcore_dev_last = NULL;\n-\tstruct virtio_net_data_ll *ll_main_dev_last = NULL;\n-\tint lcore;\n-\n-\tdev->flags &= ~VIRTIO_DEV_RUNNING;\n-\n-\t/*set the remove flag. */\n-\tdev->remove = 1;\n-\n-\twhile(dev->ready != DEVICE_SAFE_REMOVE) {\n-\t\trte_pause();\n-\t}\n-\n-\t/* Search for entry to be removed from lcore ll */\n-\tll_lcore_dev_cur = lcore_info[dev->coreid].lcore_ll->ll_root_used;\n-\twhile (ll_lcore_dev_cur != NULL) {\n-\t\tif (ll_lcore_dev_cur->dev == dev) {\n-\t\t\tbreak;\n-\t\t} else {\n-\t\t\tll_lcore_dev_last = ll_lcore_dev_cur;\n-\t\t\tll_lcore_dev_cur = ll_lcore_dev_cur->next;\n-\t\t}\n-\t}\n-\n-\tif (ll_lcore_dev_cur == NULL) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG,\n-\t\t\t\"(%\"PRIu64\") Failed to find the dev to be destroy.\\n\",\n-\t\t\tdev->device_fh);\n-\t\treturn;\n-\t}\n-\n-\t/* Search for entry to be removed from main ll */\n-\tll_main_dev_cur = ll_root_used;\n-\tll_main_dev_last = NULL;\n-\twhile (ll_main_dev_cur != NULL) {\n-\t\tif (ll_main_dev_cur->dev == dev) {\n-\t\t\tbreak;\n-\t\t} else {\n-\t\t\tll_main_dev_last = ll_main_dev_cur;\n-\t\t\tll_main_dev_cur = ll_main_dev_cur->next;\n-\t\t}\n-\t}\n-\n-\t/* Remove entries from the lcore and main ll. */\n-\trm_data_ll_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);\n-\trm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);\n-\n-\t/* Set the dev_removal_flag on each lcore. */\n-\tRTE_LCORE_FOREACH_SLAVE(lcore) {\n-\t\tlcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;\n-\t}\n-\n-\t/*\n-\t * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that\n-\t * they can no longer access the device removed from the linked lists and that the devices\n-\t * are no longer in use.\n-\t */\n-\tRTE_LCORE_FOREACH_SLAVE(lcore) {\n-\t\twhile (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {\n-\t\t\trte_pause();\n-\t\t}\n-\t}\n-\n-\t/* Add the entries back to the lcore and main free ll.*/\n-\tput_data_ll_free_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);\n-\tput_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);\n-\n-\t/* Decrement number of device on the lcore. */\n-\tlcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;\n-\n-\tRTE_LOG(INFO, VHOST_DATA, \"(%\"PRIu64\") Device has been removed from data core\\n\", dev->device_fh);\n-\n-\tif (zero_copy) {\n-\t\tstruct vpool *vpool = &vpool_array[dev->vmdq_rx_q];\n-\n-\t\t/* Stop the RX queue. */\n-\t\tif (rte_eth_dev_rx_queue_stop(ports[0], dev->vmdq_rx_q) != 0) {\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"(%\"PRIu64\") In destroy_device: Failed to stop \"\n-\t\t\t\t\"rx queue:%d\\n\",\n-\t\t\t\tdev->device_fh,\n-\t\t\t\tdev->vmdq_rx_q);\n-\t\t}\n-\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"(%\"PRIu64\") in destroy_device: Start put mbuf in \"\n-\t\t\t\"mempool back to ring for RX queue: %d\\n\",\n-\t\t\tdev->device_fh, dev->vmdq_rx_q);\n-\n-\t\tmbuf_destroy_zcp(vpool);\n-\n-\t\t/* Stop the TX queue. */\n-\t\tif (rte_eth_dev_tx_queue_stop(ports[0], dev->vmdq_rx_q) != 0) {\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"(%\"PRIu64\") In destroy_device: Failed to \"\n-\t\t\t\t\"stop tx queue:%d\\n\",\n-\t\t\t\tdev->device_fh, dev->vmdq_rx_q);\n-\t\t}\n-\n-\t\tvpool = &vpool_array[dev->vmdq_rx_q + MAX_QUEUES];\n-\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"(%\"PRIu64\") destroy_device: Start put mbuf in mempool \"\n-\t\t\t\"back to ring for TX queue: %d, dev:(%\"PRIu64\")\\n\",\n-\t\t\tdev->device_fh, (dev->vmdq_rx_q + MAX_QUEUES),\n-\t\t\tdev->device_fh);\n-\n-\t\tmbuf_destroy_zcp(vpool);\n-\t}\n-\n-}\n-\n-/*\n- * A new device is added to a data core. First the device is added to the main linked list\n- * and the allocated to a specific data core.\n- */\n-static int\n-new_device (struct virtio_net *dev)\n-{\n-\tstruct virtio_net_data_ll *ll_dev;\n-\tint lcore, core_add = 0;\n-\tuint32_t device_num_min = num_devices;\n-\n-\t/* Add device to main ll */\n-\tll_dev = get_data_ll_free_entry(&ll_root_free);\n-\tif (ll_dev == NULL) {\n-\t\tRTE_LOG(INFO, VHOST_DATA, \"(%\"PRIu64\") No free entry found in linked list. Device limit \"\n-\t\t\t\"of %d devices per core has been reached\\n\",\n-\t\t\tdev->device_fh, num_devices);\n-\t\treturn -1;\n-\t}\n-\tll_dev->dev = dev;\n-\tadd_data_ll_entry(&ll_root_used, ll_dev);\n-\tll_dev->dev->vmdq_rx_q\n-\t\t= ll_dev->dev->device_fh * (num_queues / num_devices);\n-\n-\tif (zero_copy) {\n-\t\tuint32_t index = ll_dev->dev->vmdq_rx_q;\n-\t\tuint32_t count_in_ring, i;\n-\t\tstruct mbuf_table *tx_q;\n-\n-\t\tcount_in_ring = rte_ring_count(vpool_array[index].ring);\n-\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"(%\"PRIu64\") in new_device: mbuf count in mempool \"\n-\t\t\t\"before attach is: %d\\n\",\n-\t\t\tdev->device_fh,\n-\t\t\trte_mempool_count(vpool_array[index].pool));\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"(%\"PRIu64\") in new_device: mbuf count in  ring \"\n-\t\t\t\"before attach  is : %d\\n\",\n-\t\t\tdev->device_fh, count_in_ring);\n-\n-\t\t/*\n-\t\t * Attach all mbufs in vpool.ring and put back intovpool.pool.\n-\t\t */\n-\t\tfor (i = 0; i < count_in_ring; i++)\n-\t\t\tattach_rxmbuf_zcp(dev);\n-\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") in new_device: mbuf count in \"\n-\t\t\t\"mempool after attach is: %d\\n\",\n-\t\t\tdev->device_fh,\n-\t\t\trte_mempool_count(vpool_array[index].pool));\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") in new_device: mbuf count in \"\n-\t\t\t\"ring after attach  is : %d\\n\",\n-\t\t\tdev->device_fh,\n-\t\t\trte_ring_count(vpool_array[index].ring));\n-\n-\t\ttx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q];\n-\t\ttx_q->txq_id = dev->vmdq_rx_q;\n-\n-\t\tif (rte_eth_dev_tx_queue_start(ports[0], dev->vmdq_rx_q) != 0) {\n-\t\t\tstruct vpool *vpool = &vpool_array[dev->vmdq_rx_q];\n-\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"(%\"PRIu64\") In new_device: Failed to start \"\n-\t\t\t\t\"tx queue:%d\\n\",\n-\t\t\t\tdev->device_fh, dev->vmdq_rx_q);\n-\n-\t\t\tmbuf_destroy_zcp(vpool);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tif (rte_eth_dev_rx_queue_start(ports[0], dev->vmdq_rx_q) != 0) {\n-\t\t\tstruct vpool *vpool = &vpool_array[dev->vmdq_rx_q];\n-\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"(%\"PRIu64\") In new_device: Failed to start \"\n-\t\t\t\t\"rx queue:%d\\n\",\n-\t\t\t\tdev->device_fh, dev->vmdq_rx_q);\n-\n-\t\t\t/* Stop the TX queue. */\n-\t\t\tif (rte_eth_dev_tx_queue_stop(ports[0],\n-\t\t\t\tdev->vmdq_rx_q) != 0) {\n-\t\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\t\"(%\"PRIu64\") In new_device: Failed to \"\n-\t\t\t\t\t\"stop tx queue:%d\\n\",\n-\t\t\t\t\tdev->device_fh, dev->vmdq_rx_q);\n-\t\t\t}\n-\n-\t\t\tmbuf_destroy_zcp(vpool);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t}\n-\n-\t/*reset ready flag*/\n-\tdev->ready = DEVICE_MAC_LEARNING;\n-\tdev->remove = 0;\n-\n-\t/* Find a suitable lcore to add the device. */\n-\tRTE_LCORE_FOREACH_SLAVE(lcore) {\n-\t\tif (lcore_info[lcore].lcore_ll->device_num < device_num_min) {\n-\t\t\tdevice_num_min = lcore_info[lcore].lcore_ll->device_num;\n-\t\t\tcore_add = lcore;\n-\t\t}\n-\t}\n-\t/* Add device to lcore ll */\n-\tll_dev->dev->coreid = core_add;\n-\tll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);\n-\tif (ll_dev == NULL) {\n-\t\tRTE_LOG(INFO, VHOST_DATA, \"(%\"PRIu64\") Failed to add device to data core\\n\", dev->device_fh);\n-\t\tdev->ready = DEVICE_SAFE_REMOVE;\n-\t\tdestroy_device(dev);\n-\t\treturn -1;\n-\t}\n-\tll_dev->dev = dev;\n-\tadd_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev);\n-\n-\t/* Initialize device stats */\n-\tmemset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));\n-\n-\t/* Disable notifications. */\n-\tset_irq_status(dev);\n-\tlcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;\n-\tdev->flags |= VIRTIO_DEV_RUNNING;\n-\n-\tRTE_LOG(INFO, VHOST_DATA, \"(%\"PRIu64\") Device has been added to data core %d\\n\", dev->device_fh, dev->coreid);\n-\n-\treturn 0;\n-}\n-\n-/*\n- * These callback allow devices to be added to the data core when configuration\n- * has been fully complete.\n- */\n-static const struct virtio_net_device_ops virtio_net_device_ops =\n-{\n-\t.new_device =  new_device,\n-\t.destroy_device = destroy_device,\n-};\n-\n-/*\n- * This is a thread will wake up after a period to print stats if the user has\n- * enabled them.\n- */\n-static void\n-print_stats(void)\n-{\n-\tstruct virtio_net_data_ll *dev_ll;\n-\tuint64_t tx_dropped, rx_dropped;\n-\tuint64_t tx, tx_total, rx, rx_total;\n-\tuint32_t device_fh;\n-\tconst char clr[] = { 27, '[', '2', 'J', '\\0' };\n-\tconst char top_left[] = { 27, '[', '1', ';', '1', 'H','\\0' };\n-\n-\twhile(1) {\n-\t\tsleep(enable_stats);\n-\n-\t\t/* Clear screen and move to top left */\n-\t\tprintf(\"%s%s\", clr, top_left);\n-\n-\t\tprintf(\"\\nDevice statistics ====================================\");\n-\n-\t\tdev_ll = ll_root_used;\n-\t\twhile (dev_ll != NULL) {\n-\t\t\tdevice_fh = (uint32_t)dev_ll->dev->device_fh;\n-\t\t\ttx_total = dev_statistics[device_fh].tx_total;\n-\t\t\ttx = dev_statistics[device_fh].tx;\n-\t\t\ttx_dropped = tx_total - tx;\n-\t\t\tif (zero_copy == 0) {\n-\t\t\t\trx_total = rte_atomic64_read(\n-\t\t\t\t\t&dev_statistics[device_fh].rx_total_atomic);\n-\t\t\t\trx = rte_atomic64_read(\n-\t\t\t\t\t&dev_statistics[device_fh].rx_atomic);\n-\t\t\t} else {\n-\t\t\t\trx_total = dev_statistics[device_fh].rx_total;\n-\t\t\t\trx = dev_statistics[device_fh].rx;\n-\t\t\t}\n-\t\t\trx_dropped = rx_total - rx;\n-\n-\t\t\tprintf(\"\\nStatistics for device %\"PRIu32\" ------------------------------\"\n-\t\t\t\t\t\"\\nTX total: \t\t%\"PRIu64\"\"\n-\t\t\t\t\t\"\\nTX dropped: \t\t%\"PRIu64\"\"\n-\t\t\t\t\t\"\\nTX successful: \t\t%\"PRIu64\"\"\n-\t\t\t\t\t\"\\nRX total: \t\t%\"PRIu64\"\"\n-\t\t\t\t\t\"\\nRX dropped: \t\t%\"PRIu64\"\"\n-\t\t\t\t\t\"\\nRX successful: \t\t%\"PRIu64\"\",\n-\t\t\t\t\tdevice_fh,\n-\t\t\t\t\ttx_total,\n-\t\t\t\t\ttx_dropped,\n-\t\t\t\t\ttx,\n-\t\t\t\t\trx_total,\n-\t\t\t\t\trx_dropped,\n-\t\t\t\t\trx);\n-\n-\t\t\tdev_ll = dev_ll->next;\n-\t\t}\n-\t\tprintf(\"\\n======================================================\\n\");\n-\t}\n-}\n-\n-static void\n-setup_mempool_tbl(int socket, uint32_t index, char *pool_name,\n-\tchar *ring_name, uint32_t nb_mbuf)\n-{\n-\tuint16_t roomsize = VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM;\n-\tvpool_array[index].pool\n-\t\t= rte_mempool_create(pool_name, nb_mbuf, MBUF_SIZE_ZCP,\n-\t\tMBUF_CACHE_SIZE_ZCP, sizeof(struct rte_pktmbuf_pool_private),\n-\t\trte_pktmbuf_pool_init, (void *)(uintptr_t)roomsize,\n-\t\trte_pktmbuf_init, NULL, socket, 0);\n-\tif (vpool_array[index].pool != NULL) {\n-\t\tvpool_array[index].ring\n-\t\t\t= rte_ring_create(ring_name,\n-\t\t\t\trte_align32pow2(nb_mbuf + 1),\n-\t\t\t\tsocket, RING_F_SP_ENQ | RING_F_SC_DEQ);\n-\t\tif (likely(vpool_array[index].ring != NULL)) {\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"in setup_mempool_tbl: mbuf count in \"\n-\t\t\t\t\"mempool is: %d\\n\",\n-\t\t\t\trte_mempool_count(vpool_array[index].pool));\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"in setup_mempool_tbl: mbuf count in \"\n-\t\t\t\t\"ring   is: %d\\n\",\n-\t\t\t\trte_ring_count(vpool_array[index].ring));\n-\t\t} else {\n-\t\t\trte_exit(EXIT_FAILURE, \"ring_create(%s) failed\",\n-\t\t\t\tring_name);\n-\t\t}\n-\n-\t\t/* Need consider head room. */\n-\t\tvpool_array[index].buf_size = roomsize - RTE_PKTMBUF_HEADROOM;\n-\t} else {\n-\t\trte_exit(EXIT_FAILURE, \"mempool_create(%s) failed\", pool_name);\n-\t}\n-}\n-\n-\n-/*\n- * Main function, does initialisation and calls the per-lcore functions. The CUSE\n- * device is also registered here to handle the IOCTLs.\n- */\n-int\n-MAIN(int argc, char *argv[])\n-{\n-\tstruct rte_mempool *mbuf_pool = NULL;\n-\tunsigned lcore_id, core_id = 0;\n-\tunsigned nb_ports, valid_num_ports;\n-\tint ret;\n-\tuint8_t portid, queue_id = 0;\n-\tstatic pthread_t tid;\n-\n-\t/* init EAL */\n-\tret = rte_eal_init(argc, argv);\n-\tif (ret < 0)\n-\t\trte_exit(EXIT_FAILURE, \"Error with EAL initialization\\n\");\n-\targc -= ret;\n-\targv += ret;\n-\n-\t/* parse app arguments */\n-\tret = us_vhost_parse_args(argc, argv);\n-\tif (ret < 0)\n-\t\trte_exit(EXIT_FAILURE, \"Invalid argument\\n\");\n-\n-\tif (rte_eal_pci_probe() != 0)\n-\t\trte_exit(EXIT_FAILURE, \"Error with NIC driver initialization\\n\");\n-\n-\tfor (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)\n-\t\tif (rte_lcore_is_enabled(lcore_id))\n-\t\t\tlcore_ids[core_id ++] = lcore_id;\n-\n-\tif (rte_lcore_count() > RTE_MAX_LCORE)\n-\t\trte_exit(EXIT_FAILURE,\"Not enough cores\\n\");\n-\n-\t/*set the number of swithcing cores available*/\n-\tnum_switching_cores = rte_lcore_count()-1;\n-\n-\t/* Get the number of physical ports. */\n-\tnb_ports = rte_eth_dev_count();\n-\tif (nb_ports > RTE_MAX_ETHPORTS)\n-\t\tnb_ports = RTE_MAX_ETHPORTS;\n-\n-\t/*\n-\t * Update the global var NUM_PORTS and global array PORTS\n-\t * and get value of var VALID_NUM_PORTS according to system ports number\n-\t */\n-\tvalid_num_ports = check_ports_num(nb_ports);\n-\n-\tif ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {\n-\t\tRTE_LOG(INFO, VHOST_PORT, \"Current enabled port number is %u,\"\n-\t\t\t\"but only %u port can be enabled\\n\",num_ports, MAX_SUP_PORTS);\n-\t\treturn -1;\n-\t}\n-\n-\tif (zero_copy == 0) {\n-\t\t/* Create the mbuf pool. */\n-\t\tmbuf_pool = rte_mempool_create(\n-\t\t\t\t\"MBUF_POOL\",\n-\t\t\t\tNUM_MBUFS_PER_PORT\n-\t\t\t\t* valid_num_ports,\n-\t\t\t\tMBUF_SIZE, MBUF_CACHE_SIZE,\n-\t\t\t\tsizeof(struct rte_pktmbuf_pool_private),\n-\t\t\t\trte_pktmbuf_pool_init, NULL,\n-\t\t\t\trte_pktmbuf_init, NULL,\n-\t\t\t\trte_socket_id(), 0);\n-\t\tif (mbuf_pool == NULL)\n-\t\t\trte_exit(EXIT_FAILURE, \"Cannot create mbuf pool\\n\");\n-\n-\t\tfor (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)\n-\t\t\tvpool_array[queue_id].pool = mbuf_pool;\n-\n-\t\tif (vm2vm_mode == VM2VM_HARDWARE) {\n-\t\t\t/* Enable VT loop back to let L2 switch to do it. */\n-\t\t\tvmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"Enable loop back for L2 switch in vmdq.\\n\");\n-\t\t}\n-\t} else {\n-\t\tuint32_t nb_mbuf;\n-\t\tchar pool_name[RTE_MEMPOOL_NAMESIZE];\n-\t\tchar ring_name[RTE_MEMPOOL_NAMESIZE];\n-\n-\t\trx_conf_default.start_rx_per_q = (uint8_t)zero_copy;\n-\t\trx_conf_default.rx_drop_en = 0;\n-\t\ttx_conf_default.start_tx_per_q = (uint8_t)zero_copy;\n-\t\tnb_mbuf = num_rx_descriptor\n-\t\t\t+ num_switching_cores * MBUF_CACHE_SIZE_ZCP\n-\t\t\t+ num_switching_cores * MAX_PKT_BURST;\n-\n-\t\tfor (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {\n-\t\t\tsnprintf(pool_name, sizeof(pool_name),\n-\t\t\t\t\"rxmbuf_pool_%u\", queue_id);\n-\t\t\tsnprintf(ring_name, sizeof(ring_name),\n-\t\t\t\t\"rxmbuf_ring_%u\", queue_id);\n-\t\t\tsetup_mempool_tbl(rte_socket_id(), queue_id,\n-\t\t\t\tpool_name, ring_name, nb_mbuf);\n-\t\t}\n-\n-\t\tnb_mbuf = num_tx_descriptor\n-\t\t\t\t+ num_switching_cores * MBUF_CACHE_SIZE_ZCP\n-\t\t\t\t+ num_switching_cores * MAX_PKT_BURST;\n-\n-\t\tfor (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {\n-\t\t\tsnprintf(pool_name, sizeof(pool_name),\n-\t\t\t\t\"txmbuf_pool_%u\", queue_id);\n-\t\t\tsnprintf(ring_name, sizeof(ring_name),\n-\t\t\t\t\"txmbuf_ring_%u\", queue_id);\n-\t\t\tsetup_mempool_tbl(rte_socket_id(),\n-\t\t\t\t(queue_id + MAX_QUEUES),\n-\t\t\t\tpool_name, ring_name, nb_mbuf);\n-\t\t}\n-\n-\t\tif (vm2vm_mode == VM2VM_HARDWARE) {\n-\t\t\t/* Enable VT loop back to let L2 switch to do it. */\n-\t\t\tvmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"Enable loop back for L2 switch in vmdq.\\n\");\n-\t\t}\n-\t}\n-\t/* Set log level. */\n-\trte_set_log_level(LOG_LEVEL);\n-\n-\t/* initialize all ports */\n-\tfor (portid = 0; portid < nb_ports; portid++) {\n-\t\t/* skip ports that are not enabled */\n-\t\tif ((enabled_port_mask & (1 << portid)) == 0) {\n-\t\t\tRTE_LOG(INFO, VHOST_PORT,\n-\t\t\t\t\"Skipping disabled port %d\\n\", portid);\n-\t\t\tcontinue;\n-\t\t}\n-\t\tif (port_init(portid) != 0)\n-\t\t\trte_exit(EXIT_FAILURE,\n-\t\t\t\t\"Cannot initialize network ports\\n\");\n-\t}\n-\n-\t/* Initialise all linked lists. */\n-\tif (init_data_ll() == -1)\n-\t\trte_exit(EXIT_FAILURE, \"Failed to initialize linked list\\n\");\n-\n-\t/* Initialize device stats */\n-\tmemset(&dev_statistics, 0, sizeof(dev_statistics));\n-\n-\t/* Enable stats if the user option is set. */\n-\tif (enable_stats)\n-\t\tpthread_create(&tid, NULL, (void*)print_stats, NULL );\n-\n-\t/* Launch all data cores. */\n-\tif (zero_copy == 0) {\n-\t\tRTE_LCORE_FOREACH_SLAVE(lcore_id) {\n-\t\t\trte_eal_remote_launch(switch_worker,\n-\t\t\t\tmbuf_pool, lcore_id);\n-\t\t}\n-\t} else {\n-\t\tuint32_t count_in_mempool, index, i;\n-\t\tfor (index = 0; index < 2*MAX_QUEUES; index++) {\n-\t\t\t/* For all RX and TX queues. */\n-\t\t\tcount_in_mempool\n-\t\t\t\t= rte_mempool_count(vpool_array[index].pool);\n-\n-\t\t\t/*\n-\t\t\t * Transfer all un-attached mbufs from vpool.pool\n-\t\t\t * to vpoo.ring.\n-\t\t\t */\n-\t\t\tfor (i = 0; i < count_in_mempool; i++) {\n-\t\t\t\tstruct rte_mbuf *mbuf\n-\t\t\t\t\t= __rte_mbuf_raw_alloc(\n-\t\t\t\t\t\tvpool_array[index].pool);\n-\t\t\t\trte_ring_sp_enqueue(vpool_array[index].ring,\n-\t\t\t\t\t\t(void *)mbuf);\n-\t\t\t}\n-\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"in MAIN: mbuf count in mempool at initial \"\n-\t\t\t\t\"is: %d\\n\", count_in_mempool);\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"in MAIN: mbuf count in  ring at initial  is :\"\n-\t\t\t\t\" %d\\n\",\n-\t\t\t\trte_ring_count(vpool_array[index].ring));\n-\t\t}\n-\n-\t\tRTE_LCORE_FOREACH_SLAVE(lcore_id)\n-\t\t\trte_eal_remote_launch(switch_worker_zcp, NULL,\n-\t\t\t\tlcore_id);\n-\t}\n-\n-\t/* Register CUSE device to handle IOCTLs. */\n-\tret = register_cuse_device((char*)&dev_basename, dev_index, get_virtio_net_callbacks());\n-\tif (ret != 0)\n-\t\trte_exit(EXIT_FAILURE,\"CUSE device setup failure.\\n\");\n-\n-\tinit_virtio_net(&virtio_net_device_ops);\n-\n-\t/* Start CUSE session. */\n-\tstart_cuse_session_loop();\n-\treturn 0;\n-\n-}\n-\ndiff --git a/examples/vhost/main.h b/examples/vhost/main.h\ndeleted file mode 100644\nindex c15d938..0000000\n--- a/examples/vhost/main.h\n+++ /dev/null\n@@ -1,86 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *   All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#ifndef _MAIN_H_\n-#define _MAIN_H_\n-\n-#ifdef RTE_EXEC_ENV_BAREMETAL\n-#define MAIN _main\n-#else\n-#define MAIN main\n-#endif\n-\n-//#define DEBUG\n-\n-#ifdef DEBUG\n-#define LOG_LEVEL RTE_LOG_DEBUG\n-#define LOG_DEBUG(log_type, fmt, args...) do {\t\\\n-\tRTE_LOG(DEBUG, log_type, fmt, ##args);\t\t\\\n-} while (0)\n-#else\n-#define LOG_LEVEL RTE_LOG_INFO\n-#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)\n-#endif\n-\n-/* Macros for printing using RTE_LOG */\n-#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1\n-#define RTE_LOGTYPE_VHOST_DATA   RTE_LOGTYPE_USER2\n-#define RTE_LOGTYPE_VHOST_PORT   RTE_LOGTYPE_USER3\n-\n-/*\n- * Device linked list structure for data path.\n- */\n-struct virtio_net_data_ll\n-{\n-\tstruct virtio_net\t\t\t*dev;\t/* Pointer to device created by configuration core. */\n-\tstruct virtio_net_data_ll\t*next;  /* Pointer to next device in linked list. */\n-};\n-\n-/*\n- * Structure containing data core specific information.\n- */\n-struct lcore_ll_info\n-{\n-\tstruct virtio_net_data_ll\t*ll_root_free; \t\t/* Pointer to head in free linked list. */\n-\tstruct virtio_net_data_ll\t*ll_root_used;\t\t/* Pointer to head of used linked list. */\n-\tuint32_t \t\t\t\t\tdevice_num;\t\t\t/* Number of devices on lcore. */\n-\tvolatile uint8_t\t\t\tdev_removal_flag;\t/* Flag to synchronize device removal. */\n-};\n-\n-struct lcore_info\n-{\n-\tstruct lcore_ll_info\t*lcore_ll;\t/* Pointer to data core specific lcore_ll_info struct */\n-};\n-\n-int MAIN(int argc, char **argv);\n-#endif /* _MAIN_H_ */\ndiff --git a/examples/vhost/vhost-net-cdev.c b/examples/vhost/vhost-net-cdev.c\ndeleted file mode 100644\nindex 8cf806a..0000000\n--- a/examples/vhost/vhost-net-cdev.c\n+++ /dev/null\n@@ -1,367 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *   All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#include <errno.h>\n-#include <fuse/cuse_lowlevel.h>\n-#include <linux/limits.h>\n-#include <linux/vhost.h>\n-#include <stdint.h>\n-#include <string.h>\n-#include <unistd.h>\n-\n-#include <rte_ethdev.h>\n-#include <rte_log.h>\n-#include <rte_string_fns.h>\n-\n-#include \"main.h\"\n-#include \"vhost-net-cdev.h\"\n-\n-#define FUSE_OPT_DUMMY \t\t\"\\0\\0\"\n-#define FUSE_OPT_FORE \t\t\"-f\\0\\0\"\n-#define FUSE_OPT_NOMULTI \t\"-s\\0\\0\"\n-\n-const uint32_t\tdefault_major = 231;\n-const uint32_t\tdefault_minor = 1;\n-const char\t\tcuse_device_name[]\t= \"/dev/cuse\";\n-const char\t\tdefault_cdev[] = \"vhost-net\";\n-\n-static struct fuse_session\t\t\t*session;\n-static struct vhost_net_device_ops\tconst *ops;\n-\n-/*\n- * Returns vhost_device_ctx from given fuse_req_t. The index is populated later when\n- * the device is added to the device linked list.\n- */\n-static struct vhost_device_ctx\n-fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)\n-{\n-\tstruct vhost_device_ctx ctx;\n-\tstruct fuse_ctx const *const req_ctx = fuse_req_ctx(req);\n-\n-\tctx.pid = req_ctx->pid;\n-\tctx.fh = fi->fh;\n-\n-\treturn ctx;\n-}\n-\n-/*\n- * When the device is created in QEMU it gets initialised here and added to the device linked list.\n- */\n-static void\n-vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)\n-{\n-\tstruct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);\n-\tint err = 0;\n-\n-\terr = ops->new_device(ctx);\n-\tif (err == -1) {\n-\t\tfuse_reply_err(req, EPERM);\n-\t\treturn;\n-\t}\n-\n-\tfi->fh = err;\n-\n-\tRTE_LOG(INFO, VHOST_CONFIG, \"(%\"PRIu64\") Device configuration started\\n\", fi->fh);\n-\tfuse_reply_open(req, fi);\n-}\n-\n-/*\n- * When QEMU is shutdown or killed the device gets released.\n- */\n-static void\n-vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)\n-{\n-\tint err = 0;\n-\tstruct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);\n-\n-\tops->destroy_device(ctx);\n-\tRTE_LOG(INFO, VHOST_CONFIG, \"(%\"PRIu64\") Device released\\n\", ctx.fh);\n-\tfuse_reply_err(req, err);\n-}\n-\n-/*\n- * Boilerplate code for CUSE IOCTL\n- * Implicit arguments: ctx, req, result.\n- */\n-#define VHOST_IOCTL(func) do {\t\t\t\t\t\t\t\t\\\n-\tresult = (func)(ctx);\t\t\t\t\t\t\t\t\t\\\n-\tfuse_reply_ioctl(req, result, NULL, 0);\t\t\t\t\t\\\n-} while(0)\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\n-/*\n- * Boilerplate IOCTL RETRY\n- * Implicit arguments: req.\n- */\n-#define VHOST_IOCTL_RETRY(size_r, size_w) do {\t\t\t\t\t\t\t\t\t\\\n-\tstruct iovec iov_r = { arg, (size_r) };\t\t\t\t\t\t\t\t\t\t\\\n-\tstruct iovec iov_w = { arg, (size_w) };\t\t\t\t\t\t\t\t\t\t\\\n-\tfuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0);\t\\\n-} while(0)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\n-/*\n- * Boilerplate code for CUSE Read IOCTL\n- * Implicit arguments: ctx, req, result, in_bufsz, in_buf.\n- */\n-#define VHOST_IOCTL_R(type, var, func) do {\t\t\t\t\\\n-\tif (!in_bufsz) {\t\t\t\t\t\t\t\t\t\\\n-\t\tVHOST_IOCTL_RETRY(sizeof(type), 0);\t\t\t\t\\\n-\t} else {\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\t(var) = *(const type * ) in_buf;\t\t\t\t\\\n-\t\tresult = func(ctx, &(var));\t\t\t\t\t\t\\\n-\t\tfuse_reply_ioctl(req, result, NULL, 0);\t\t\t\\\n-\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-} while(0)\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\n-/*\n- *\tBoilerplate code for CUSE Write IOCTL\n- * Implicit arguments: ctx, req, result, out_bufsz.\n- */\n-#define\tVHOST_IOCTL_W(type, var, func) do {\t\t\t\t\t\t\\\n-\tif (!out_bufsz) {\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\tVHOST_IOCTL_RETRY(0, sizeof(type));\t\t\t\t\t\t\\\n-\t} else {\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\tresult = (func)(ctx, &(var));\t\t\t\t\t\t\t\\\n-\t\tfuse_reply_ioctl(req, result, &(var), sizeof(type));\t\\\n-\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-} while(0)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\n-/*\n- * Boilerplate code for CUSE Read/Write IOCTL\n- * Implicit arguments: ctx, req, result, in_bufsz, in_buf.\n- */\n-#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {\t\t\t\\\n-\tif (!in_bufsz) {\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\tVHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));\t\t\t\\\n-\t} else {\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\t\t(var1) = *(const type1* ) (in_buf);\t\t\t\t\t\t\t\\\n-\t\tresult = (func)(ctx, (var1), &(var2));\t\t\t\t\t\t\\\n-\t\tfuse_reply_ioctl(req, result, &(var2), sizeof(type2));\t\t\\\n-\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-} while(0)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n-\n-/*\n- * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on\n- * the type of IOCTL a buffer is requested to read or to write. This\n- * request is handled by FUSE and the buffer is then given to CUSE.\n- */\n-static void\n-vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,\n-\t\tstruct fuse_file_info *fi, __rte_unused unsigned flags,\n-\t\tconst void *in_buf, size_t in_bufsz, size_t out_bufsz)\n-{\n-\tstruct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);\n-\tstruct vhost_vring_file file;\n-\tstruct vhost_vring_state state;\n-\tstruct vhost_vring_addr addr;\n-\tuint64_t features;\n-\tuint32_t index;\n-\tint result = 0;\n-\n-\tswitch(cmd)\n-\t{\n-\t\tcase VHOST_NET_SET_BACKEND:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_NET_SET_BACKEND\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_GET_FEATURES:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_GET_FEATURES\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_W(uint64_t, features, ops->get_features);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_FEATURES:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_FEATURES\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_R(uint64_t, features, ops->set_features);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_RESET_OWNER:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_RESET_OWNER\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL(ops->reset_owner);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_OWNER:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_OWNER\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL(ops->set_owner);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_MEM_TABLE:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_MEM_TABLE\\n\", ctx.fh);\n-\t\t\tstatic struct vhost_memory mem_temp;\n-\n-\t\t\tswitch(in_bufsz){\n-\t\t\t\tcase 0:\n-\t\t\t\t\tVHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);\n-\t\t\t\t\tbreak;\n-\n-\t\t\t\tcase sizeof(struct vhost_memory):\n-\t\t\t\t\tmem_temp = *(const struct vhost_memory *) in_buf;\n-\n-\t\t\t\t\tif (mem_temp.nregions > 0) {\n-\t\t\t\t\t\tVHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);\n-\t\t\t\t\t} else {\n-\t\t\t\t\t\tresult = -1;\n-\t\t\t\t\t\tfuse_reply_ioctl(req, result, NULL, 0);\n-\t\t\t\t\t}\n-\t\t\t\t\tbreak;\n-\n-\t\t\t\tdefault:\n-\t\t\t\t\tresult = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);\n-\t\t\t\t\tif (result)\n-\t\t\t\t\t\tfuse_reply_err(req, EINVAL);\n-\t\t\t\t\telse\n-\t\t\t\t\t\tfuse_reply_ioctl(req, result, NULL, 0);\n-\n-\t\t\t}\n-\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_VRING_NUM:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_NUM\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_VRING_BASE:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_BASE\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_GET_VRING_BASE:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_GET_VRING_BASE\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_VRING_ADDR:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_ADDR\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_VRING_KICK:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_KICK\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);\n-\t\t\tbreak;\n-\n-\t\tcase VHOST_SET_VRING_CALL:\n-\t\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: VHOST_SET_VRING_CALL\\n\", ctx.fh);\n-\t\t\tVHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);\n-\t\t\tbreak;\n-\n-\t\tdefault:\n-\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: DOESN NOT EXIST\\n\", ctx.fh);\n-\t\t\tresult = -1;\n-\t\t\tfuse_reply_ioctl(req, result, NULL, 0);\n-\t}\n-\n-\tif (result < 0) {\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: FAIL\\n\", ctx.fh);\n-\t} else {\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") IOCTL: SUCCESS\\n\", ctx.fh);\n-\t}\n-}\n-\n-/*\n- * Structure handling open, release and ioctl function pointers is populated.\n- */\n-static const struct cuse_lowlevel_ops vhost_net_ops = {\n-\t.open\t\t= vhost_net_open,\n-\t.release\t= vhost_net_release,\n-\t.ioctl\t\t= vhost_net_ioctl,\n-};\n-\n-/*\n- * cuse_info is populated and used to register the cuse device. vhost_net_device_ops are\n- * also passed when the device is registered in main.c.\n- */\n-int\n-register_cuse_device(const char *base_name, int index, struct vhost_net_device_ops const * const pops)\n-{\n-\tstruct cuse_info cuse_info;\n-\tchar device_name[PATH_MAX] = \"\";\n-\tchar char_device_name[PATH_MAX] = \"\";\n-\tconst char *device_argv[] = { device_name };\n-\n-\tchar fuse_opt_dummy[] = FUSE_OPT_DUMMY;\n-\tchar fuse_opt_fore[] = FUSE_OPT_FORE;\n-\tchar fuse_opt_nomulti[] = FUSE_OPT_NOMULTI;\n-\tchar *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};\n-\n-\tif (access(cuse_device_name, R_OK | W_OK) < 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Character device %s can't be accessed, maybe not exist\\n\", cuse_device_name);\n-\t\treturn -1;\n-\t}\n-\n-\t/*\n-\t * The device name is created. This is passed to QEMU so that it can register\n-\t * the device with our application. The index allows us to have multiple instances\n-\t * of userspace vhost which we can then add devices to separately.\n-\t */\n-\tif (strncmp(base_name, default_cdev, PATH_MAX)!=0) {\n-\t\tsnprintf(device_name, PATH_MAX, \"DEVNAME=%s-%d\", base_name, index);\n-\t\tsnprintf(char_device_name, PATH_MAX, \"/dev/%s-%d\", base_name, index);\n-\t} else {\n-\t\tsnprintf(device_name, PATH_MAX, \"DEVNAME=%s\", base_name);\n-\t\tsnprintf(char_device_name, PATH_MAX, \"/dev/%s\", base_name);\n-\t}\n-\n-\t/* Check if device already exists. */\n-\tif (access(char_device_name, F_OK) != -1) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Character device %s already exists\\n\", char_device_name);\n-\t\treturn -1;\n-\t}\n-\n-\tmemset(&cuse_info, 0, sizeof(cuse_info));\n-\tcuse_info.dev_major = default_major;\n-\tcuse_info.dev_minor = default_minor + index;\n-\tcuse_info.dev_info_argc = 1;\n-\tcuse_info.dev_info_argv = device_argv;\n-\tcuse_info.flags = CUSE_UNRESTRICTED_IOCTL;\n-\n-\tops = pops;\n-\n-\tsession = cuse_lowlevel_setup(3, fuse_argv,\n-\t\t\t\t&cuse_info, &vhost_net_ops, 0, NULL);\n-\tif (session == NULL)\n-\t\treturn -1;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * The CUSE session is launched allowing the application to receive open, release and ioctl calls.\n- */\n-int\n-start_cuse_session_loop(void)\n-{\n-\tfuse_session_loop(session);\n-\n-\treturn 0;\n-}\ndiff --git a/examples/vhost/vhost-net-cdev.h b/examples/vhost/vhost-net-cdev.h\ndeleted file mode 100644\nindex 575daa9..0000000\n--- a/examples/vhost/vhost-net-cdev.h\n+++ /dev/null\n@@ -1,83 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *   All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#ifndef _VHOST_NET_CDEV_H_\n-#define _VHOST_NET_CDEV_H_\n-\n-#include <linux/vhost.h>\n-\n-struct vhost_memory;\n-struct vhost_vring_state;\n-struct vhost_vring_addr;\n-struct vhost_vring_file;\n-\n-/*\n- * Structure used to identify device context.\n- */\n-struct vhost_device_ctx\n-{\n-\tpid_t\t\tpid;\t/* PID of process calling the IOCTL. */\n-\tuint64_t \tfh;\t\t/* Populated with fi->fh to track the device index. */\n-};\n-\n-/*\n- * Structure contains function pointers to be defined in virtio-net.c. These\n- * functions are called in CUSE context and are used to configure devices.\n- */\n-struct vhost_net_device_ops {\n-\tint (* new_device) \t\t(struct vhost_device_ctx);\n-\tvoid (* destroy_device) (struct vhost_device_ctx);\n-\n-\tint (* get_features) \t(struct vhost_device_ctx, uint64_t *);\n-\tint (* set_features) \t(struct vhost_device_ctx, uint64_t *);\n-\n-\tint (* set_mem_table) \t(struct vhost_device_ctx, const void *, uint32_t);\n-\n-\tint (* set_vring_num) \t(struct vhost_device_ctx, struct vhost_vring_state *);\n-\tint (* set_vring_addr) \t(struct vhost_device_ctx, struct vhost_vring_addr *);\n-\tint (* set_vring_base) \t(struct vhost_device_ctx, struct vhost_vring_state *);\n-\tint (* get_vring_base) \t(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);\n-\n-\tint (* set_vring_kick) \t(struct vhost_device_ctx, struct vhost_vring_file *);\n-\tint (* set_vring_call) \t(struct vhost_device_ctx, struct vhost_vring_file *);\n-\n-\tint (* set_backend) \t(struct vhost_device_ctx, struct vhost_vring_file *);\n-\n-\tint (* set_owner) \t\t(struct vhost_device_ctx);\n-\tint (* reset_owner) \t(struct vhost_device_ctx);\n-};\n-\n-int register_cuse_device(const char *base_name, int index, struct vhost_net_device_ops const * const);\n-int start_cuse_session_loop(void);\n-\n-#endif /* _VHOST_NET_CDEV_H_ */\ndiff --git a/examples/vhost/virtio-net.c b/examples/vhost/virtio-net.c\ndeleted file mode 100644\nindex 5e659c7..0000000\n--- a/examples/vhost/virtio-net.c\n+++ /dev/null\n@@ -1,1165 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *   All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#include <dirent.h>\n-#include <fuse/cuse_lowlevel.h>\n-#include <linux/vhost.h>\n-#include <linux/virtio_net.h>\n-#include <stddef.h>\n-#include <stdint.h>\n-#include <stdlib.h>\n-#include <sys/eventfd.h>\n-#include <sys/ioctl.h>\n-#include <sys/mman.h>\n-#include <unistd.h>\n-\n-#include <rte_ethdev.h>\n-#include <rte_log.h>\n-#include <rte_string_fns.h>\n-#include <rte_memory.h>\n-\n-#include \"main.h\"\n-#include \"virtio-net.h\"\n-#include \"vhost-net-cdev.h\"\n-#include \"eventfd_link/eventfd_link.h\"\n-\n-const char eventfd_cdev[] = \"/dev/eventfd-link\";\n-\n-extern uint32_t num_devices;\n-static uint32_t num_cur_devices = 0;\n-\n-/* device ops to add/remove device to data core. */\n-static struct virtio_net_device_ops const * notify_ops;\n-/* Root address of the linked list in the configuration core. */\n-static struct virtio_net_config_ll\t\t\t*ll_root = NULL;\n-\n-/* Features supported by this application. RX merge buffers are disabled by default. */\n-uint64_t VHOST_FEATURES = (0ULL << VIRTIO_NET_F_MRG_RXBUF);\n-\n-/* Line size for reading maps file. */\n-const uint32_t BUFSIZE = PATH_MAX;\n-\n-/* Size of prot char array in procmap. */\n-#define PROT_SZ 5\n-\n-/* Number of elements in procmap struct. */\n-#define PROCMAP_SZ 8\n-\n-/* Structure containing information gathered from maps file. */\n-struct procmap\n-{\n-\tuint64_t\tva_start;\t\t\t/* Start virtual address in file. */\n-\tuint64_t\tlen;\t\t\t\t/* Size of file. */\n-\tuint64_t\tpgoff;\t\t\t\t/* Not used. */\n-\tuint32_t\tmaj;\t\t\t\t/* Not used. */\n-\tuint32_t\tmin;\t\t\t\t/* Not used. */\n-\tuint32_t\tino;\t\t\t\t/* Not used. */\n-\tchar\t\tprot[PROT_SZ];\t\t/* Not used. */\n-\tchar\t\tfname[PATH_MAX];\t/* File name. */\n-};\n-\n-/*\n- * Converts QEMU virtual address to Vhost virtual address. This function is used\n- * to convert the ring addresses to our address space.\n- */\n-static uint64_t\n-qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)\n-{\n-\tstruct virtio_memory_regions *region;\n-\tuint64_t vhost_va = 0;\n-\tuint32_t regionidx = 0;\n-\n-\t/* Find the region where the address lives. */\n-\tfor (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {\n-\t\tregion = &dev->mem->regions[regionidx];\n-\t\tif ((qemu_va >= region->userspace_address) &&\n-\t\t\t\t(qemu_va <= region->userspace_address +\n-\t\t\t \tregion->memory_size)) {\n-\t\t\tvhost_va = dev->mem->mapped_address + qemu_va - dev->mem->base_address;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\treturn vhost_va;\n-}\n-\n-/*\n- * Locate the file containing QEMU's memory space and map it to our address space.\n- */\n-static int\n-host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr)\n-{\n-\tstruct dirent *dptr = NULL;\n-\tstruct procmap procmap;\n-\tDIR *dp = NULL;\n-\tint fd;\n-\tint i;\n-\tchar memfile[PATH_MAX];\n-\tchar mapfile[PATH_MAX];\n-\tchar procdir[PATH_MAX];\n-\tchar resolved_path[PATH_MAX];\n-\tFILE\t\t*fmap;\n-\tvoid\t\t*map;\n-\tuint8_t \tfound = 0;\n-\tchar \t\tline[BUFSIZE];\n-\tchar dlm[] = \"-   :   \";\n-\tchar *str, *sp, *in[PROCMAP_SZ];\n-\tchar *end = NULL;\n-\n-\t/* Path where mem files are located. */\n-\tsnprintf (procdir, PATH_MAX, \"/proc/%u/fd/\", pid);\n-\t/* Maps file used to locate mem file. */\n-\tsnprintf (mapfile, PATH_MAX, \"/proc/%u/maps\", pid);\n-\n-\tfmap = fopen(mapfile, \"r\");\n-\tif (fmap == NULL) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to open maps file for pid %d\\n\", dev->device_fh, pid);\n-\t\treturn -1;\n-\t}\n-\n-\t/* Read through maps file until we find out base_address. */\n-\twhile (fgets(line, BUFSIZE, fmap) != 0) {\n-\t\tstr = line;\n-\t\terrno = 0;\n-\t\t/* Split line in to fields. */\n-\t\tfor (i = 0; i < PROCMAP_SZ; i++) {\n-\t\t\tif (((in[i] = strtok_r(str, &dlm[i], &sp)) == NULL) || (errno != 0)) {\n-\t\t\t\tfclose(fmap);\n-\t\t\t\treturn -1;\n-\t\t\t}\n-\t\t\tstr = NULL;\n-\t\t}\n-\n-\t\t/* Convert/Copy each field as needed. */\n-\t\tprocmap.va_start = strtoull(in[0], &end, 16);\n-\t\tif ((in[0] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n-\t\t\tfclose(fmap);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tprocmap.len = strtoull(in[1], &end, 16);\n-\t\tif ((in[1] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n-\t\t\tfclose(fmap);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tprocmap.pgoff = strtoull(in[3], &end, 16);\n-\t\tif ((in[3] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n-\t\t\tfclose(fmap);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tprocmap.maj = strtoul(in[4], &end, 16);\n-\t\tif ((in[4] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n-\t\t\tfclose(fmap);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tprocmap.min = strtoul(in[5], &end, 16);\n-\t\tif ((in[5] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n-\t\t\tfclose(fmap);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tprocmap.ino = strtoul(in[6], &end, 16);\n-\t\tif ((in[6] == '\\0') || (end == NULL) || (*end != '\\0') || (errno != 0)) {\n-\t\t\tfclose(fmap);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tmemcpy(&procmap.prot, in[2], PROT_SZ);\n-\t\tmemcpy(&procmap.fname, in[7], PATH_MAX);\n-\n-\t\tif (procmap.va_start == addr) {\n-\t\t\tprocmap.len = procmap.len - procmap.va_start;\n-\t\t\tfound = 1;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\tfclose(fmap);\n-\n-\tif (!found) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find memory file in pid %d maps file\\n\", dev->device_fh, pid);\n-\t\treturn -1;\n-\t}\n-\n-\t/* Find the guest memory file among the process fds. */\n-\tdp = opendir(procdir);\n-\tif (dp == NULL) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Cannot open pid %d process directory \\n\", dev->device_fh, pid);\n-\t\treturn -1;\n-\n-\t}\n-\n-\tfound = 0;\n-\n-\t/* Read the fd directory contents. */\n-\twhile (NULL != (dptr = readdir(dp))) {\n-\t\tsnprintf (memfile, PATH_MAX, \"/proc/%u/fd/%s\", pid, dptr->d_name);\n-\t    realpath(memfile, resolved_path);\n-\t\tif (resolved_path == NULL) {\n-\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to resolve fd directory\\n\", dev->device_fh);\n-\t\t\tclosedir(dp);\n-\t\t\treturn -1;\n-\t\t}\n-\t\tif (strncmp(resolved_path, procmap.fname,\n-\t\t\tstrnlen(procmap.fname, PATH_MAX)) == 0) {\n-\t\t\tfound = 1;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\tclosedir(dp);\n-\n-\tif (found == 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find memory file for pid %d\\n\", dev->device_fh, pid);\n-\t\treturn -1;\n-\t}\n-\t/* Open the shared memory file and map the memory into this process. */\n-\tfd = open(memfile, O_RDWR);\n-\n-\tif (fd == -1) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to open %s for pid %d\\n\", dev->device_fh, memfile, pid);\n-\t\treturn -1;\n-\t}\n-\n-\tmap = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE , MAP_POPULATE|MAP_SHARED, fd, 0);\n-\tclose (fd);\n-\n-\tif (map == MAP_FAILED) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Error mapping the file %s for pid %d\\n\",  dev->device_fh, memfile, pid);\n-\t\treturn -1;\n-\t}\n-\n-\t/* Store the memory address and size in the device data structure */\n-\tmem->mapped_address = (uint64_t)(uintptr_t)map;\n-\tmem->mapped_size = procmap.len;\n-\n-\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Mem File: %s->%s - Size: %llu - VA: %p\\n\", dev->device_fh,\n-\t\tmemfile, resolved_path, (long long unsigned)mem->mapped_size, map);\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Retrieves an entry from the devices configuration linked list.\n- */\n-static struct virtio_net_config_ll *\n-get_config_ll_entry(struct vhost_device_ctx ctx)\n-{\n-\tstruct virtio_net_config_ll *ll_dev = ll_root;\n-\n-\t/* Loop through linked list until the device_fh is found. */\n-\twhile (ll_dev != NULL) {\n-\t\tif (ll_dev->dev.device_fh == ctx.fh)\n-\t\t\treturn ll_dev;\n-\t\tll_dev = ll_dev->next;\n-\t}\n-\n-\treturn NULL;\n-}\n-\n-/*\n- * Searches the configuration core linked list and retrieves the device if it exists.\n- */\n-static struct virtio_net *\n-get_device(struct vhost_device_ctx ctx)\n-{\n-\tstruct virtio_net_config_ll *ll_dev;\n-\n-\tll_dev = get_config_ll_entry(ctx);\n-\n-\t/* If a matching entry is found in the linked list, return the device in that entry. */\n-\tif (ll_dev) {\n-\t\treturn &ll_dev->dev;\n-\t}\n-\n-\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Device not found in linked list.\\n\", ctx.fh);\n-\treturn NULL;\n-}\n-\n-/*\n- * Add entry containing a device to the device configuration linked list.\n- */\n-static void\n-add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)\n-{\n-\tstruct virtio_net_config_ll *ll_dev = ll_root;\n-\n-\t/* If ll_dev == NULL then this is the first device so go to else */\n-\tif (ll_dev) {\n-\t\t/* If the 1st device_fh != 0 then we insert our device here. */\n-\t\tif (ll_dev->dev.device_fh != 0)\t{\n-\t\t\tnew_ll_dev->dev.device_fh = 0;\n-\t\t\tnew_ll_dev->next = ll_dev;\n-\t\t\tll_root = new_ll_dev;\n-\t\t} else {\n-\t\t\t/* Increment through the ll until we find un unused device_fh. Insert the device at that entry*/\n-\t\t\twhile ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))\n-\t\t\t\tll_dev = ll_dev->next;\n-\n-\t\t\tnew_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;\n-\t\t\tnew_ll_dev->next = ll_dev->next;\n-\t\t\tll_dev->next = new_ll_dev;\n-\t\t}\n-\t} else {\n-\t\tll_root = new_ll_dev;\n-\t\tll_root->dev.device_fh = 0;\n-\t}\n-\n-}\n-\n-/*\n- * Unmap any memory, close any file descriptors and free any memory owned by a device.\n- */\n-static void\n-cleanup_device(struct virtio_net *dev)\n-{\n-\t/* Unmap QEMU memory file if mapped. */\n-\tif (dev->mem) {\n-\t\tmunmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);\n-\t\tif (dev->mem->regions_hpa)\n-\t\t\tfree(dev->mem->regions_hpa);\n-\t\tfree(dev->mem);\n-\t}\n-\n-\t/* Close any event notifiers opened by device. */\n-\tif (dev->virtqueue[VIRTIO_RXQ]->callfd)\n-\t\tclose((int)dev->virtqueue[VIRTIO_RXQ]->callfd);\n-\tif (dev->virtqueue[VIRTIO_RXQ]->kickfd)\n-\t\tclose((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);\n-\tif (dev->virtqueue[VIRTIO_TXQ]->callfd)\n-\t\tclose((int)dev->virtqueue[VIRTIO_TXQ]->callfd);\n-\tif (dev->virtqueue[VIRTIO_TXQ]->kickfd)\n-\t\tclose((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);\n-}\n-\n-/*\n- * Release virtqueues and device memory.\n- */\n-static void\n-free_device(struct virtio_net_config_ll *ll_dev)\n-{\n-\t/* Free any malloc'd memory */\n-\tfree(ll_dev->dev.virtqueue[VIRTIO_RXQ]);\n-\tfree(ll_dev->dev.virtqueue[VIRTIO_TXQ]);\n-\tfree(ll_dev);\n-}\n-/*\n- * Remove an entry from the device configuration linked list.\n- */\n-static struct virtio_net_config_ll *\n-rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last)\n-{\n-\t/* First remove the device and then clean it up. */\n-\tif (ll_dev == ll_root) {\n-\t\tll_root = ll_dev->next;\n-\t\tcleanup_device(&ll_dev->dev);\n-\t\tfree_device(ll_dev);\n-\t\treturn ll_root;\n-\t} else {\n-\t\tif (likely(ll_dev_last != NULL)) {\n-\t\t\tll_dev_last->next = ll_dev->next;\n-\t\t\tcleanup_device(&ll_dev->dev);\n-\t\t\tfree_device(ll_dev);\n-\t\t\treturn ll_dev_last->next;\n-\t\t} else {\n-\t\t\tcleanup_device(&ll_dev->dev);\n-\t\t\tfree_device(ll_dev);\n-\t\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Remove entry from config_ll failed\\n\");\n-\t\t\treturn NULL;\n-\t\t}\n-\t}\n-}\n-\n-/*\n- *  Initialise all variables in device structure.\n- */\n-static void\n-init_device(struct virtio_net *dev)\n-{\n-\tuint64_t vq_offset;\n-\n-\t/* Virtqueues have already been malloced so we don't want to set them to NULL. */\n-\tvq_offset = offsetof(struct virtio_net, mem);\n-\n-\t/* Set everything to 0. */\n-\tmemset((void*)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,\n-\t\t(sizeof(struct virtio_net) - (size_t)vq_offset));\n-\tmemset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));\n-\tmemset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));\n-\n-\t/* Backends are set to -1 indicating an inactive device. */\n-\tdev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;\n-\tdev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;\n-}\n-\n-/*\n- * Function is called from the CUSE open function. The device structure is\n- * initialised and a new entry is added to the device configuration linked\n- * list.\n- */\n-static int\n-new_device(struct vhost_device_ctx ctx)\n-{\n-\tstruct virtio_net_config_ll *new_ll_dev;\n-\tstruct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;\n-\n-\t/*check the number of devices in the system*/\n-\tif (num_cur_devices == num_devices) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"() Max num devices (%u) exceeded\\n\", num_devices);\n-\t\treturn -1;\n-\t}\n-\n-\t/* Setup device and virtqueues. */\n-\tnew_ll_dev = malloc(sizeof(struct virtio_net_config_ll));\n-\tif (new_ll_dev == NULL) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for dev.\\n\", ctx.fh);\n-\t\treturn -1;\n-\t}\n-\n-\tvirtqueue_rx = malloc(sizeof(struct vhost_virtqueue));\n-\tif (virtqueue_rx == NULL) {\n-\t\tfree(new_ll_dev);\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for virtqueue_rx.\\n\", ctx.fh);\n-\t\treturn -1;\n-\t}\n-\n-\tvirtqueue_tx = malloc(sizeof(struct vhost_virtqueue));\n-\tif (virtqueue_tx == NULL) {\n-\t\tfree(virtqueue_rx);\n-\t\tfree(new_ll_dev);\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for virtqueue_tx.\\n\", ctx.fh);\n-\t\treturn -1;\n-\t}\n-\n-\tnew_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;\n-\tnew_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;\n-\n-\t/* Initialise device and virtqueues. */\n-\tinit_device(&new_ll_dev->dev);\n-\n-\tnew_ll_dev->next = NULL;\n-\n-\t/* Add entry to device configuration linked list. */\n-\tadd_config_ll_entry(new_ll_dev);\n-\n-\t/*increment the number of devices in the system*/\n-\tnum_cur_devices++;\n-\n-\treturn new_ll_dev->dev.device_fh;\n-}\n-\n-/*\n- * Function is called from the CUSE release function. This function will cleanup\n- * the device and remove it from device configuration linked list.\n- */\n-static void\n-destroy_device(struct vhost_device_ctx ctx)\n-{\n-\tstruct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;\n-\tstruct virtio_net_config_ll *ll_dev_cur = ll_root;\n-\n-\t/* Find the linked list entry for the device to be removed. */\n-\tll_dev_cur_ctx = get_config_ll_entry(ctx);\n-\twhile (ll_dev_cur != NULL) {\n-\t\t/* If the device is found or a device that doesn't exist is found then it is removed. */\n-\t\tif (ll_dev_cur == ll_dev_cur_ctx) {\n-\t\t\t/*\n-\t\t\t * If the device is running on a data core then call the function to remove it from\n-\t\t\t * the data core.\n-\t\t\t */\n-\t\t\tif ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))\n-\t\t\t\tnotify_ops->destroy_device(&(ll_dev_cur->dev));\n-\t\t\tll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);\n-\t\t} else {\n-\t\t\tll_dev_last = ll_dev_cur;\n-\t\t\tll_dev_cur = ll_dev_cur->next;\n-\t\t}\n-\t}\n-\n-\t/*decrement the number of devices in the system*/\n-\tnum_cur_devices--;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_OWNER\n- * This function just returns success at the moment unless the device hasn't been initialised.\n- */\n-static int\n-set_owner(struct vhost_device_ctx ctx)\n-{\n-\tstruct virtio_net *dev;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_RESET_OWNER\n- */\n-static int\n-reset_owner(struct vhost_device_ctx ctx)\n-{\n-\tstruct virtio_net_config_ll *ll_dev;\n-\n-\tll_dev = get_config_ll_entry(ctx);\n-\n-\tcleanup_device(&ll_dev->dev);\n-\tinit_device(&ll_dev->dev);\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_GET_FEATURES\n- * The features that we support are requested.\n- */\n-static int\n-get_features(struct vhost_device_ctx ctx, uint64_t *pu)\n-{\n-\tstruct virtio_net *dev;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\t/* Send our supported features. */\n-\t*pu = VHOST_FEATURES;\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_FEATURES\n- * We receive the negotiated set of features supported by us and the virtio device.\n- */\n-static int\n-set_features(struct vhost_device_ctx ctx, uint64_t *pu)\n-{\n-\tstruct virtio_net *dev;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\tif (*pu & ~VHOST_FEATURES)\n-\t\treturn -1;\n-\n-\t/* Store the negotiated feature list for the device. */\n-\tdev->features = *pu;\n-\n-\t/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */\n-\tif (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Mergeable RX buffers enabled\\n\", dev->device_fh);\n-\t\tdev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n-\t\tdev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n-\t} else {\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Mergeable RX buffers disabled\\n\", dev->device_fh);\n-\t\tdev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);\n-\t\tdev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);\n-\t}\n-\treturn 0;\n-}\n-\n-/*\n- * Calculate the region count of physical continous regions for one particular\n- * region of whose vhost virtual address is continous. The particular region\n- * start from vva_start, with size of 'size' in argument.\n- */\n-static uint32_t check_hpa_regions(uint64_t vva_start, uint64_t size)\n-{\n-\tuint32_t i, nregions = 0, page_size = PAGE_SIZE;\n-\tuint64_t cur_phys_addr = 0, next_phys_addr = 0;\n-\tif (vva_start % page_size) {\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"in check_countinous: vva start(%p) mod page_size(%d) \"\n-\t\t\t\"has remainder\\n\",\n-\t\t\t(void *)(uintptr_t)vva_start, page_size);\n-\t\treturn 0;\n-\t}\n-\tif (size % page_size) {\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"in check_countinous: \"\n-\t\t\t\"size((%\"PRIu64\")) mod page_size(%d) has remainder\\n\",\n-\t\t\tsize, page_size);\n-\t\treturn 0;\n-\t}\n-\tfor (i = 0; i < size - page_size; i = i + page_size) {\n-\t\tcur_phys_addr\n-\t\t\t= rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));\n-\t\tnext_phys_addr = rte_mem_virt2phy(\n-\t\t\t(void *)(uintptr_t)(vva_start + i + page_size));\n-\t\tif ((cur_phys_addr + page_size) != next_phys_addr) {\n-\t\t\t++nregions;\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"in check_continuous: hva addr:(%p) is not \"\n-\t\t\t\t\"continuous with hva addr:(%p), diff:%d\\n\",\n-\t\t\t\t(void *)(uintptr_t)(vva_start + (uint64_t)i),\n-\t\t\t\t(void *)(uintptr_t)(vva_start + (uint64_t)i\n-\t\t\t\t+ page_size), page_size);\n-\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\"in check_continuous: hpa addr:(%p) is not \"\n-\t\t\t\t\"continuous with hpa addr:(%p), \"\n-\t\t\t\t\"diff:(%\"PRIu64\")\\n\",\n-\t\t\t\t(void *)(uintptr_t)cur_phys_addr,\n-\t\t\t\t(void *)(uintptr_t)next_phys_addr,\n-\t\t\t\t(next_phys_addr-cur_phys_addr));\n-\t\t}\n-\t}\n-\treturn nregions;\n-}\n-\n-/*\n- * Divide each region whose vhost virtual address is continous into a few\n- * sub-regions, make sure the physical address within each sub-region are\n- * continous. And fill offset(to GPA) and size etc. information of each\n- * sub-region into regions_hpa.\n- */\n-static uint32_t fill_hpa_memory_regions(void *memory)\n-{\n-\tuint32_t regionidx, regionidx_hpa = 0, i, k, page_size = PAGE_SIZE;\n-\tuint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;\n-\tstruct virtio_memory *virtio_memory = (struct virtio_memory *)memory;\n-\tstruct virtio_memory_regions_hpa *mem_region_hpa\n-\t\t= virtio_memory->regions_hpa;\n-\n-\tif (mem_region_hpa == NULL)\n-\t\treturn 0;\n-\n-\tfor (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {\n-\t\tvva_start = virtio_memory->regions[regionidx].guest_phys_address\n-\t\t\t+ virtio_memory->regions[regionidx].address_offset;\n-\t\tmem_region_hpa[regionidx_hpa].guest_phys_address\n-\t\t\t= virtio_memory->regions[regionidx].guest_phys_address;\n-\t\tmem_region_hpa[regionidx_hpa].host_phys_addr_offset =\n-\t\t\trte_mem_virt2phy((void *)(uintptr_t)(vva_start))\n-\t\t\t- mem_region_hpa[regionidx_hpa].guest_phys_address;\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"in fill_hpa_regions: guest phys addr start[%d]:(%p)\\n\",\n-\t\t\tregionidx_hpa,\n-\t\t\t(void *)(uintptr_t)\n-\t\t\t(mem_region_hpa[regionidx_hpa].guest_phys_address));\n-\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\"in fill_hpa_regions: host  phys addr start[%d]:(%p)\\n\",\n-\t\t\tregionidx_hpa,\n-\t\t\t(void *)(uintptr_t)\n-\t\t\t(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));\n-\t\tfor (i = 0, k = 0;\n-\t\t\ti < virtio_memory->regions[regionidx].memory_size\n-\t\t\t\t- page_size;\n-\t\t\ti += page_size) {\n-\t\t\tcur_phys_addr = rte_mem_virt2phy(\n-\t\t\t\t\t(void *)(uintptr_t)(vva_start + i));\n-\t\t\tnext_phys_addr = rte_mem_virt2phy(\n-\t\t\t\t\t(void *)(uintptr_t)(vva_start\n-\t\t\t\t\t+ i + page_size));\n-\t\t\tif ((cur_phys_addr + page_size) != next_phys_addr) {\n-\t\t\t\tmem_region_hpa[regionidx_hpa].guest_phys_address_end =\n-\t\t\t\t\tmem_region_hpa[regionidx_hpa].guest_phys_address\n-\t\t\t\t\t+ k + page_size;\n-\t\t\t\tmem_region_hpa[regionidx_hpa].memory_size\n-\t\t\t\t\t= k + page_size;\n-\t\t\t\tLOG_DEBUG(VHOST_CONFIG, \"in fill_hpa_regions: guest \"\n-\t\t\t\t\t\"phys addr end  [%d]:(%p)\\n\",\n-\t\t\t\t\tregionidx_hpa,\n-\t\t\t\t\t(void *)(uintptr_t)\n-\t\t\t\t\t(mem_region_hpa[regionidx_hpa].guest_phys_address_end));\n-\t\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\t\"in fill_hpa_regions: guest phys addr \"\n-\t\t\t\t\t\"size [%d]:(%p)\\n\",\n-\t\t\t\t\tregionidx_hpa,\n-\t\t\t\t\t(void *)(uintptr_t)\n-\t\t\t\t\t(mem_region_hpa[regionidx_hpa].memory_size));\n-\t\t\t\tmem_region_hpa[regionidx_hpa + 1].guest_phys_address\n-\t\t\t\t\t= mem_region_hpa[regionidx_hpa].guest_phys_address_end;\n-\t\t\t\t++regionidx_hpa;\n-\t\t\t\tmem_region_hpa[regionidx_hpa].host_phys_addr_offset =\n-\t\t\t\t\tnext_phys_addr\n-\t\t\t\t\t- mem_region_hpa[regionidx_hpa].guest_phys_address;\n-\t\t\t\tLOG_DEBUG(VHOST_CONFIG, \"in fill_hpa_regions: guest\"\n-\t\t\t\t\t\" phys addr start[%d]:(%p)\\n\",\n-\t\t\t\t\tregionidx_hpa,\n-\t\t\t\t\t(void *)(uintptr_t)\n-\t\t\t\t\t(mem_region_hpa[regionidx_hpa].guest_phys_address));\n-\t\t\t\tLOG_DEBUG(VHOST_CONFIG,\n-\t\t\t\t\t\"in fill_hpa_regions: host  phys addr \"\n-\t\t\t\t\t\"start[%d]:(%p)\\n\",\n-\t\t\t\t\tregionidx_hpa,\n-\t\t\t\t\t(void *)(uintptr_t)\n-\t\t\t\t\t(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));\n-\t\t\t\tk = 0;\n-\t\t\t} else {\n-\t\t\t\tk += page_size;\n-\t\t\t}\n-\t\t}\n-\t\tmem_region_hpa[regionidx_hpa].guest_phys_address_end\n-\t\t\t= mem_region_hpa[regionidx_hpa].guest_phys_address\n-\t\t\t+ k + page_size;\n-\t\tmem_region_hpa[regionidx_hpa].memory_size = k + page_size;\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"in fill_hpa_regions: guest phys addr end  \"\n-\t\t\t\"[%d]:(%p)\\n\", regionidx_hpa,\n-\t\t\t(void *)(uintptr_t)\n-\t\t\t(mem_region_hpa[regionidx_hpa].guest_phys_address_end));\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"in fill_hpa_regions: guest phys addr size \"\n-\t\t\t\"[%d]:(%p)\\n\", regionidx_hpa,\n-\t\t\t(void *)(uintptr_t)\n-\t\t\t(mem_region_hpa[regionidx_hpa].memory_size));\n-\t\t++regionidx_hpa;\n-\t}\n-\treturn regionidx_hpa;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE\n- * This function creates and populates the memory structure for the device. This includes\n- * storing offsets used to translate buffer addresses.\n- */\n-static int\n-set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_t nregions)\n-{\n-\tstruct virtio_net *dev;\n-\tstruct vhost_memory_region *mem_regions;\n-\tstruct virtio_memory *mem;\n-\tuint64_t size = offsetof(struct vhost_memory, regions);\n-\tuint32_t regionidx, valid_regions;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\tif (dev->mem) {\n-\t\tmunmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);\n-\t\tfree(dev->mem);\n-\t}\n-\n-\t/* Malloc the memory structure depending on the number of regions. */\n-\tmem = calloc(1, sizeof(struct virtio_memory) + (sizeof(struct virtio_memory_regions) * nregions));\n-\tif (mem == NULL) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to allocate memory for dev->mem.\\n\", dev->device_fh);\n-\t\treturn -1;\n-\t}\n-\n-\tmem->nregions = nregions;\n-\n-\tmem_regions = (void*)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size);\n-\n-\tfor (regionidx = 0; regionidx < mem->nregions; regionidx++) {\n-\t\t/* Populate the region structure for each region. */\n-\t\tmem->regions[regionidx].guest_phys_address = mem_regions[regionidx].guest_phys_addr;\n-\t\tmem->regions[regionidx].guest_phys_address_end = mem->regions[regionidx].guest_phys_address +\n-\t\t\tmem_regions[regionidx].memory_size;\n-\t\tmem->regions[regionidx].memory_size = mem_regions[regionidx].memory_size;\n-\t\tmem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr;\n-\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%\"PRIu64\")\\n\", dev->device_fh,\n-\t\t\t\tregionidx, (void*)(uintptr_t)mem->regions[regionidx].guest_phys_address,\n-\t\t\t\t(void*)(uintptr_t)mem->regions[regionidx].userspace_address,\n-\t\t\t\tmem->regions[regionidx].memory_size);\n-\n-\t\t/*set the base address mapping*/\n-\t\tif (mem->regions[regionidx].guest_phys_address == 0x0) {\n-\t\t\tmem->base_address = mem->regions[regionidx].userspace_address;\n-\t\t\t/* Map VM memory file */\n-\t\t\tif (host_memory_map(dev, mem, ctx.pid, mem->base_address) != 0) {\n-\t\t\t\tfree(mem);\n-\t\t\t\treturn -1;\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\t/* Check that we have a valid base address. */\n-\tif (mem->base_address == 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find base address of qemu memory file.\\n\", dev->device_fh);\n-\t\tfree(mem);\n-\t\treturn -1;\n-\t}\n-\n-\t/* Check if all of our regions have valid mappings. Usually one does not exist in the QEMU memory file. */\n-\tvalid_regions = mem->nregions;\n-\tfor (regionidx = 0; regionidx < mem->nregions; regionidx++) {\n-\t\tif ((mem->regions[regionidx].userspace_address < mem->base_address) ||\n-\t\t\t(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size)))\n-\t\t\t\tvalid_regions--;\n-\t}\n-\n-\t/* If a region does not have a valid mapping we rebuild our memory struct to contain only valid entries. */\n-\tif (valid_regions != mem->nregions) {\n-\t\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\\n\",\n-\t\t\tdev->device_fh);\n-\n-\t\t/* Re-populate the memory structure with only valid regions. Invalid regions are over-written with memmove. */\n-\t\tvalid_regions = 0;\n-\n-\t\tfor (regionidx = mem->nregions; 0 != regionidx--;) {\n-\t\t\tif ((mem->regions[regionidx].userspace_address < mem->base_address) ||\n-\t\t\t\t\t(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size))) {\n-\t\t\t\tmemmove(&mem->regions[regionidx], &mem->regions[regionidx + 1],\n-\t\t\t\t\tsizeof(struct virtio_memory_regions) * valid_regions);\n-\t\t\t} else {\n-\t\t\t\tvalid_regions++;\n-\t\t\t}\n-\t\t}\n-\t}\n-\tmem->nregions = valid_regions;\n-\tmem->nregions_hpa = mem->nregions;\n-\tdev->mem = mem;\n-\n-\t/*\n-\t * Calculate the address offset for each region. This offset is used to identify the vhost virtual address\n-\t * corresponding to a QEMU guest physical address.\n-\t */\n-\tfor (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {\n-\t\tdev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address\n-\t\t\t+ dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;\n-\n-\t\tdev->mem->nregions_hpa\n-\t\t\t+= check_hpa_regions(\n-\t\t\t\tdev->mem->regions[regionidx].guest_phys_address\n-\t\t\t\t+ dev->mem->regions[regionidx].address_offset,\n-\t\t\t\tdev->mem->regions[regionidx].memory_size);\n-\t}\n-\tif (dev->mem->regions_hpa != NULL) {\n-\t\tfree(dev->mem->regions_hpa);\n-\t\tdev->mem->regions_hpa = NULL;\n-\t}\n-\n-\tdev->mem->regions_hpa = (struct virtio_memory_regions_hpa *) calloc(1,\n-\t\t(sizeof(struct virtio_memory_regions_hpa)\n-\t\t* dev->mem->nregions_hpa));\n-\tif (dev->mem->regions_hpa == NULL) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG,\n-\t\t\t\"(%\"PRIu64\") Failed to allocate memory for \"\n-\t\t\t\"dev->mem->regions_hpa.\\n\", dev->device_fh);\n-\t\treturn -1;\n-\t}\n-\tif (fill_hpa_memory_regions(\n-\t\t(void *)dev->mem) != dev->mem->nregions_hpa) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG,\n-\t\t\t\"in set_mem_table: hpa memory regions number mismatch: \"\n-\t\t\t\"[%d]\\n\", dev->mem->nregions_hpa);\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_VRING_NUM\n- * The virtio device sends us the size of the descriptor ring.\n- */\n-static int\n-set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)\n-{\n-\tstruct virtio_net *dev;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\t/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n-\tdev->virtqueue[state->index]->size = state->num;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR\n- * The virtio device sends us the desc, used and avail ring addresses. This function\n- * then converts these to our address space.\n- */\n-static int\n-set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)\n-{\n-\tstruct virtio_net *dev;\n-\tstruct vhost_virtqueue *vq;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\t/* addr->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n-\tvq = dev->virtqueue[addr->index];\n-\n-\t/* The addresses are converted from QEMU virtual to Vhost virtual. */\n-\tvq->desc = (struct vring_desc*)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);\n-\tif (vq->desc == 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find descriptor ring address.\\n\", dev->device_fh);\n-\t\treturn -1;\n-\t}\n-\n-\tvq->avail = (struct vring_avail*)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);\n-\tif (vq->avail == 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find available ring address.\\n\", dev->device_fh);\n-\t\treturn -1;\n-\t}\n-\n-\tvq->used = (struct vring_used*)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);\n-\tif (vq->used == 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") Failed to find used ring address.\\n\", dev->device_fh);\n-\t\treturn -1;\n-\t}\n-\n-\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") mapped address desc: %p\\n\", dev->device_fh, vq->desc);\n-\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") mapped address avail: %p\\n\", dev->device_fh, vq->avail);\n-\tLOG_DEBUG(VHOST_CONFIG, \"(%\"PRIu64\") mapped address used: %p\\n\", dev->device_fh, vq->used);\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_VRING_BASE\n- * The virtio device sends us the available ring last used index.\n- */\n-static int\n-set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)\n-{\n-\tstruct virtio_net *dev;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\t/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n-\tdev->virtqueue[state->index]->last_used_idx = state->num;\n-\tdev->virtqueue[state->index]->last_used_idx_res = state->num;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_GET_VRING_BASE\n- * We send the virtio device our available ring last used index.\n- */\n-static int\n-get_vring_base(struct vhost_device_ctx ctx, uint32_t index, struct vhost_vring_state *state)\n-{\n-\tstruct virtio_net *dev;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\tstate->index = index;\n-\t/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n-\tstate->num = dev->virtqueue[state->index]->last_used_idx;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * This function uses the eventfd_link kernel module to copy an eventfd file descriptor\n- * provided by QEMU in to our process space.\n- */\n-static int\n-eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)\n-{\n-\tint eventfd_link, ret;\n-\n-\t/* Open the character device to the kernel module. */\n-\teventfd_link = open(eventfd_cdev, O_RDWR);\n-\tif (eventfd_link < 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") eventfd_link module is not loaded\\n\",  dev->device_fh);\n-\t\treturn -1;\n-\t}\n-\n-\t/* Call the IOCTL to copy the eventfd. */\n-\tret = ioctl(eventfd_link, EVENTFD_COPY, eventfd_copy);\n-\tclose(eventfd_link);\n-\n-\tif (ret < 0) {\n-\t\tRTE_LOG(ERR, VHOST_CONFIG, \"(%\"PRIu64\") EVENTFD_COPY ioctl failed\\n\",  dev->device_fh);\n-\t\treturn -1;\n-\t}\n-\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_VRING_CALL\n- * The virtio device sends an eventfd to interrupt the guest. This fd gets copied in\n- * to our process space.\n- */\n-static int\n-set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)\n-{\n-\tstruct virtio_net *dev;\n-\tstruct eventfd_copy\teventfd_kick;\n-\tstruct vhost_virtqueue *vq;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\t/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n-\tvq = dev->virtqueue[file->index];\n-\n-\tif (vq->kickfd)\n-\t\tclose((int)vq->kickfd);\n-\n-\t/* Populate the eventfd_copy structure and call eventfd_copy. */\n-\tvq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);\n-\teventfd_kick.source_fd = vq->kickfd;\n-\teventfd_kick.target_fd = file->fd;\n-\teventfd_kick.target_pid = ctx.pid;\n-\n-\tif (eventfd_copy(dev, &eventfd_kick))\n-\t\treturn -1;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_SET_VRING_KICK\n- * The virtio device sends an eventfd that it can use to notify us. This fd gets copied in\n- * to our process space.\n- */\n-static int\n-set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)\n-{\n-\tstruct virtio_net *dev;\n-\tstruct eventfd_copy eventfd_call;\n-\tstruct vhost_virtqueue *vq;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL)\n-\t\treturn -1;\n-\n-\t/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n-\tvq = dev->virtqueue[file->index];\n-\n-\tif (vq->callfd)\n-\t\tclose((int)vq->callfd);\n-\n-\t/* Populate the eventfd_copy structure and call eventfd_copy. */\n-\tvq->callfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);\n-\teventfd_call.source_fd = vq->callfd;\n-\teventfd_call.target_fd = file->fd;\n-\teventfd_call.target_pid = ctx.pid;\n-\n-\tif (eventfd_copy(dev, &eventfd_call))\n-        return -1;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND\n- * To complete device initialisation when the virtio driver is loaded we are provided with a\n- * valid fd for a tap device (not used by us). If this happens then we can add the device to a\n- * data core. When the virtio driver is removed we get fd=-1. At that point we remove the device\n- * from the data core. The device will still exist in the device configuration linked list.\n- */\n-static int\n-set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)\n-{\n-\tstruct virtio_net *dev;\n-\n-\tdev = get_device(ctx);\n-\tif (dev == NULL) {\n-\t\treturn -1;\n-\t}\n-\n-\t/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */\n-\tdev->virtqueue[file->index]->backend = file->fd;\n-\n-\t/* If the device isn't already running and both backend fds are set we add the device. */\n-\tif (!(dev->flags & VIRTIO_DEV_RUNNING)) {\n-\t\tif (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&\n-\t\t\t((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED))\n-\t\t\treturn notify_ops->new_device(dev);\n-\t/* Otherwise we remove it. */\n-\t} else\n-\t\tif (file->fd == VIRTIO_DEV_STOPPED) {\n-\t\t\tnotify_ops->destroy_device(dev);\n-\t\t}\n-\treturn 0;\n-}\n-\n-/*\n- * Function pointers are set for the device operations to allow CUSE to call functions\n- * when an IOCTL, device_add or device_release is received.\n- */\n-static const struct vhost_net_device_ops vhost_device_ops =\n-{\n-\t.new_device = new_device,\n-\t.destroy_device = destroy_device,\n-\n-\t.get_features = get_features,\n-\t.set_features = set_features,\n-\n-\t.set_mem_table = set_mem_table,\n-\n-\t.set_vring_num = set_vring_num,\n-\t.set_vring_addr = set_vring_addr,\n-\t.set_vring_base = set_vring_base,\n-\t.get_vring_base = get_vring_base,\n-\n-\t.set_vring_kick = set_vring_kick,\n-\t.set_vring_call = set_vring_call,\n-\n-\t.set_backend = set_backend,\n-\n-\t.set_owner = set_owner,\n-\t.reset_owner = reset_owner,\n-};\n-\n-/*\n- * Called by main to setup callbacks when registering CUSE device.\n- */\n-struct vhost_net_device_ops const *\n-get_virtio_net_callbacks(void)\n-{\n-\treturn &vhost_device_ops;\n-}\n-\n-/*\n- * Register ops so that we can add/remove device to data core.\n- */\n-int\n-init_virtio_net(struct virtio_net_device_ops const * const ops)\n-{\n-\tnotify_ops = ops;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * Currently not used as we Ctrl+c to exit application.\n- */\n-int\n-deinit_virtio_net(void)\n-{\n-\treturn 0;\n-}\ndiff --git a/examples/vhost/virtio-net.h b/examples/vhost/virtio-net.h\ndeleted file mode 100644\nindex 1a2f0dc..0000000\n--- a/examples/vhost/virtio-net.h\n+++ /dev/null\n@@ -1,161 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n- *   All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#ifndef _VIRTIO_NET_H_\n-#define _VIRTIO_NET_H_\n-\n-/* Used to indicate that the device is running on a data core */\n-#define VIRTIO_DEV_RUNNING 1\n-\n-/* Backend value set by guest. */\n-#define VIRTIO_DEV_STOPPED -1\n-\n-#define PAGE_SIZE   4096\n-\n-/* Enum for virtqueue management. */\n-enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};\n-\n-#define BUF_VECTOR_MAX 256\n-\n-/*\n- * Structure contains buffer address, length and descriptor index\n- * from vring to do scatter RX.\n-*/\n-struct buf_vector {\n-uint64_t buf_addr;\n-uint32_t buf_len;\n-uint32_t desc_idx;\n-};\n-\n-/*\n- * Structure contains variables relevant to TX/RX virtqueues.\n- */\n-struct vhost_virtqueue\n-{\n-\tstruct vring_desc\t*desc;\t\t\t\t/* Virtqueue descriptor ring. */\n-\tstruct vring_avail\t*avail;\t\t\t\t/* Virtqueue available ring. */\n-\tstruct vring_used\t*used;\t\t\t\t/* Virtqueue used ring. */\n-\tuint32_t\t\t\tsize;\t\t\t\t/* Size of descriptor ring. */\n-\tuint32_t\t\t\tbackend;\t\t\t/* Backend value to determine if device should started/stopped. */\n-\tuint16_t\t\t\tvhost_hlen;\t\t\t/* Vhost header length (varies depending on RX merge buffers. */\n-\tvolatile uint16_t\tlast_used_idx;\t\t/* Last index used on the available ring */\n-\tvolatile uint16_t\tlast_used_idx_res;\t/* Used for multiple devices reserving buffers. */\n-\teventfd_t\t\t\tcallfd;\t\t\t\t/* Currently unused as polling mode is enabled. */\n-\teventfd_t\t\t\tkickfd;\t\t\t\t/* Used to notify the guest (trigger interrupt). */\n-\t/* Used for scatter RX. */\n-\tstruct buf_vector\tbuf_vec[BUF_VECTOR_MAX];\n-} __rte_cache_aligned;\n-\n-/*\n- * Device structure contains all configuration information relating to the device.\n- */\n-struct virtio_net\n-{\n-\tstruct vhost_virtqueue\t*virtqueue[VIRTIO_QNUM];\t/* Contains all virtqueue information. */\n-\tstruct virtio_memory \t*mem;\t\t\t\t\t\t/* QEMU memory and memory region information. */\n-\tstruct ether_addr \t\tmac_address;\t\t\t\t/* Device MAC address (Obtained on first TX packet). */\n-\tuint64_t \t\t\t\tfeatures;\t\t\t\t\t/* Negotiated feature set. */\n-\tuint64_t \t\t\t\tdevice_fh;\t\t\t\t\t/* device identifier. */\n-\tuint32_t \t\t\t\tvmdq_rx_q;\t\t\t\t\t/* RX VMDQ queue number. */\n-\tuint32_t \t\t\t\tflags;\t\t\t\t\t\t/* Device flags. Only used to check if device is running on data core. */\n-\tuint32_t \t\t\t\tvlan_tag;\t\t\t\t\t/* Vlan tag for device. Currently set to device_id (0-63). */\n-\tuint16_t \t\t\t\tcoreid;\t\t\t\t\t\t/* Data core that the device is added to. */\n-\tvolatile uint8_t \t\tready;\t\t\t\t\t\t/* A device is set as ready if the MAC address has been set. */\n-\tvolatile uint8_t\t\tremove;\t\t\t\t\t\t/* Device is marked for removal from the data core. */\n-} __rte_cache_aligned;\n-\n-/*\n- * Device linked list structure for configuration.\n- */\n-struct virtio_net_config_ll\n-{\n-\tstruct virtio_net\t\tdev;\t/* Virtio device. */\n-\tstruct virtio_net_config_ll\t*next; /* Next entry on linked list. */\n-};\n-\n-/*\n- * Information relating to memory regions including offsets to addresses in QEMUs memory file.\n- */\n-struct virtio_memory_regions {\n-\tuint64_t\tguest_phys_address;\t\t/* Base guest physical address of region. */\n-\tuint64_t\tguest_phys_address_end;\t/* End guest physical address of region. */\n-\tuint64_t\tmemory_size;\t\t\t/* Size of region. */\n-\tuint64_t\tuserspace_address;\t\t/* Base userspace address of region. */\n-\tuint64_t\taddress_offset;\t\t\t/* Offset of region for address translation. */\n-};\n-\n-/*\n- * Information relating to memory regions including offsets to\n- * addresses in host physical space.\n- */\n-struct virtio_memory_regions_hpa {\n-\t/* Base guest physical address of region. */\n-\tuint64_t\tguest_phys_address;\n-\t/* End guest physical address of region. */\n-\tuint64_t\tguest_phys_address_end;\n-\t/* Size of region. */\n-\tuint64_t\tmemory_size;\n-\t/* Offset of region for gpa to hpa translation. */\n-\tuint64_t\thost_phys_addr_offset;\n-};\n-\n-/*\n- * Memory structure includes region and mapping information.\n- */\n-struct virtio_memory {\n-\tuint64_t\t\t\tbase_address;\t\t\t/* Base QEMU userspace address of the memory file. */\n-\tuint64_t\t\t\tmapped_address;\t\t\t/* Mapped address of memory file base in our applications memory space. */\n-\tuint64_t\t\t\tmapped_size;\t\t\t/* Total size of memory file. */\n-\tuint32_t\t\t\tnregions;\t\t\t\t/* Number of memory regions. */\n-\t /* Number of memory regions for gpa to hpa translation. */\n-\tuint32_t\t\t\tnregions_hpa;\n-\t/* Memory region information for gpa to hpa translation. */\n-\tstruct virtio_memory_regions_hpa  *regions_hpa;\n-\t/* Memory region information. */\n-\tstruct virtio_memory_regions      regions[0];\n-};\n-\n-/*\n- * Device operations to add/remove device.\n- */\n-struct virtio_net_device_ops {\n-\tint (* new_device) \t\t(struct virtio_net *);\t/* Add device. */\n-\tvoid (* destroy_device)\t(volatile struct virtio_net *);\t/* Remove device. */\n-};\n-\n-int init_virtio_net(struct virtio_net_device_ops const * const);\n-int deinit_virtio_net(void);\n-\n-struct vhost_net_device_ops const * get_virtio_net_callbacks(void);\n-\n-#endif /* _VIRTIO_NET_H_ */\n",
    "prefixes": [
        "dpdk-dev",
        "1/3"
    ]
}