get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/9084/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 9084,
    "url": "http://patches.dpdk.org/api/patches/9084/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1448355603-21275-3-git-send-email-mukawa@igel.co.jp/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1448355603-21275-3-git-send-email-mukawa@igel.co.jp>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1448355603-21275-3-git-send-email-mukawa@igel.co.jp",
    "date": "2015-11-24T09:00:02",
    "name": "[dpdk-dev,v5,2/3] vhost: Add VHOST PMD",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "bb023a54d0e132e11b8221347745285fbad08eec",
    "submitter": {
        "id": 64,
        "url": "http://patches.dpdk.org/api/people/64/?format=api",
        "name": "Tetsuya Mukawa",
        "email": "mukawa@igel.co.jp"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1448355603-21275-3-git-send-email-mukawa@igel.co.jp/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/9084/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/9084/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 4A5AF8F9C;\n\tTue, 24 Nov 2015 10:00:33 +0100 (CET)",
            "from mail-pa0-f51.google.com (mail-pa0-f51.google.com\n\t[209.85.220.51]) by dpdk.org (Postfix) with ESMTP id 662C58E9D\n\tfor <dev@dpdk.org>; Tue, 24 Nov 2015 10:00:31 +0100 (CET)",
            "by padhx2 with SMTP id hx2so16121108pad.1\n\tfor <dev@dpdk.org>; Tue, 24 Nov 2015 01:00:30 -0800 (PST)",
            "from localhost.localdomain (napt.igel.co.jp. [219.106.231.132])\n\tby smtp.gmail.com with ESMTPSA id\n\tpy5sm13430738pbc.8.2015.11.24.01.00.27\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tTue, 24 Nov 2015 01:00:29 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=igel-co-jp.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=bo3yIvDTPPf5g2INdb//B5yPmq0wx6ccae6i+TIJ5jo=;\n\tb=h9rLjBjYMGhpbQJN9QZWtw5gdzLUg4H+TcAhTsJbhV8C4z6hIL2xu0Qlc18B9xG+mv\n\t6k9WnqufK2T+IU5qrh+vlx0IRJLPL6llRS/QoUy3Tv66TTaCs9Upl0Kg4EU5i042VHxn\n\t04q5Wo13kaKaDxqo8fPxb0J8AAsZqfADv2A1g+10ykB8zDFwqnxAzhgC2GyWoa73R9ZW\n\tRVqxwCvGM0R0RNPYtG3ftXk171olf019e4pVz0LfHT4HF/+StPS26t6mnD8ey89WihfM\n\t5k535VgFPhec9SE04538aglJgAP4YSq1b91N4AcqSEYwJhOYzM8dvmPVPjDhPEUtKf88\n\t7+4Q==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20130820;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=bo3yIvDTPPf5g2INdb//B5yPmq0wx6ccae6i+TIJ5jo=;\n\tb=LzbH88RDcnKEVgbrF+hbbNt9mNUc3co0fbNNSjFWPQIpCH2B4SMPA+zi/3IwxkHZsu\n\tuh9RnVcKHA36Mhk2ps4i5M1ikB21LpXLg3bc+WieOdr4UUwp+52sTOA5b8lhAUmyc3x7\n\t+Ds4emWOXLZTQ3P9i+Otbkm9D9zGsQVcD6tMhZYier7FNljRCvl8aVDv3Guuv2Pb4cWi\n\ti2BiZBV5l5N5sRu1F5/lmNg/CKBPW/0vWltEpP9TKFalja/0OJg0YvzH2U9vcNk+tcXT\n\t6Lc368OJ0KGPTFY23oZmfbsKpJFKlZ3e0J9TLauzWNSkqWtSekkD+TUBNExaMMf4nkS5\n\tK9Kg==",
        "X-Gm-Message-State": "ALoCoQliDNOEnw+ZG+52Xso6ywvfm0/HHvfA1eo/fduUdPi4B/tS/apWh8f20J2gEgRCBwrXhDjC",
        "X-Received": "by 10.66.119.237 with SMTP id\n\tkx13mr41381955pab.158.1448355630801; \n\tTue, 24 Nov 2015 01:00:30 -0800 (PST)",
        "From": "Tetsuya Mukawa <mukawa@igel.co.jp>",
        "To": "dev@dpdk.org",
        "Date": "Tue, 24 Nov 2015 18:00:02 +0900",
        "Message-Id": "<1448355603-21275-3-git-send-email-mukawa@igel.co.jp>",
        "X-Mailer": "git-send-email 2.1.4",
        "In-Reply-To": "<1448355603-21275-1-git-send-email-mukawa@igel.co.jp>",
        "References": "<1447392031-24970-3-git-send-email-mukawa@igel.co.jp>\n\t<1448355603-21275-1-git-send-email-mukawa@igel.co.jp>",
        "Cc": "yuanhan.liu@intel.com, ann.zhuangyanying@huawei.com",
        "Subject": "[dpdk-dev] [PATCH v5 2/3] vhost: Add VHOST PMD",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The patch introduces a new PMD. This PMD is implemented as thin wrapper\nof librte_vhost. It means librte_vhost is also needed to compile the PMD.\nThe vhost messages will be handled only when a port is started. So start\na port first, then invoke QEMU.\n\nThe PMD has 2 parameters.\n - iface:  The parameter is used to specify a path to connect to a\n           virtio-net device.\n - queues: The parameter is used to specify the number of the queues\n           virtio-net device has.\n           (Default: 1)\n\nHere is an example.\n$ ./testpmd -c f -n 4 --vdev 'eth_vhost0,iface=/tmp/sock0,queues=1' -- -i\n\nTo connect above testpmd, here is qemu command example.\n\n$ qemu-system-x86_64 \\\n        <snip>\n        -chardev socket,id=chr0,path=/tmp/sock0 \\\n        -netdev vhost-user,id=net0,chardev=chr0,vhostforce,queues=1 \\\n        -device virtio-net-pci,netdev=net0\n\nSigned-off-by: Tetsuya Mukawa <mukawa@igel.co.jp>\n---\n config/common_linuxapp                      |   6 +\n doc/guides/nics/index.rst                   |   1 +\n doc/guides/rel_notes/release_2_2.rst        |   2 +\n drivers/net/Makefile                        |   4 +\n drivers/net/vhost/Makefile                  |  57 ++\n drivers/net/vhost/rte_eth_vhost.c           | 771 ++++++++++++++++++++++++++++\n drivers/net/vhost/rte_pmd_vhost_version.map |   8 +\n mk/rte.app.mk                               |   8 +-\n 8 files changed, 856 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/vhost/Makefile\n create mode 100644 drivers/net/vhost/rte_eth_vhost.c\n create mode 100644 drivers/net/vhost/rte_pmd_vhost_version.map",
    "diff": "diff --git a/config/common_linuxapp b/config/common_linuxapp\nindex f72c46d..0140a8e 100644\n--- a/config/common_linuxapp\n+++ b/config/common_linuxapp\n@@ -466,6 +466,12 @@ CONFIG_RTE_LIBRTE_VHOST_NUMA=n\n CONFIG_RTE_LIBRTE_VHOST_DEBUG=n\n \n #\n+# Compile vhost PMD\n+# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled.\n+#\n+CONFIG_RTE_LIBRTE_PMD_VHOST=y\n+\n+#\n #Compile Xen domain0 support\n #\n CONFIG_RTE_LIBRTE_XEN_DOM0=n\ndiff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst\nindex 0a0b724..26db9b7 100644\n--- a/doc/guides/nics/index.rst\n+++ b/doc/guides/nics/index.rst\n@@ -48,6 +48,7 @@ Network Interface Controller Drivers\n     mlx5\n     szedata2\n     virtio\n+    vhost\n     vmxnet3\n     pcap_ring\n \ndiff --git a/doc/guides/rel_notes/release_2_2.rst b/doc/guides/rel_notes/release_2_2.rst\nindex 8c77768..b6071ab 100644\n--- a/doc/guides/rel_notes/release_2_2.rst\n+++ b/doc/guides/rel_notes/release_2_2.rst\n@@ -111,6 +111,8 @@ New Features\n \n * **Added vhost-user multiple queue support.**\n \n+* **Added vhost PMD.**\n+\n * **Added port hotplug support to vmxnet3.**\n \n * **Added port hotplug support to xenvirt.**\ndiff --git a/drivers/net/Makefile b/drivers/net/Makefile\nindex cddcd57..18d03cf 100644\n--- a/drivers/net/Makefile\n+++ b/drivers/net/Makefile\n@@ -51,5 +51,9 @@ DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio\n DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3\n DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt\n \n+ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)\n+DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost\n+endif # $(CONFIG_RTE_LIBRTE_VHOST)\n+\n include $(RTE_SDK)/mk/rte.sharelib.mk\n include $(RTE_SDK)/mk/rte.subdir.mk\ndiff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile\nnew file mode 100644\nindex 0000000..8bec47a\n--- /dev/null\n+++ b/drivers/net/vhost/Makefile\n@@ -0,0 +1,57 @@\n+#   BSD LICENSE\n+#\n+#   Copyright (c) 2010-2015 Intel Corporation.\n+#   All rights reserved.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of Intel corporation nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+#\n+# library name\n+#\n+LIB = librte_pmd_vhost.a\n+\n+CFLAGS += -O3\n+CFLAGS += $(WERROR_FLAGS)\n+\n+EXPORT_MAP := rte_pmd_vhost_version.map\n+\n+LIBABIVER := 1\n+\n+#\n+# all source are stored in SRCS-y\n+#\n+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += rte_eth_vhost.c\n+\n+# this lib depends upon:\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mbuf\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_ether\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_kvargs\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_vhost\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c\nnew file mode 100644\nindex 0000000..9ef05bc\n--- /dev/null\n+++ b/drivers/net/vhost/rte_eth_vhost.c\n@@ -0,0 +1,771 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright (c) 2015 IGEL Co., Ltd.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of IGEL Co.,Ltd. nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+#include <unistd.h>\n+#include <pthread.h>\n+\n+#include <rte_mbuf.h>\n+#include <rte_ethdev.h>\n+#include <rte_malloc.h>\n+#include <rte_memcpy.h>\n+#include <rte_dev.h>\n+#include <rte_kvargs.h>\n+#include <rte_virtio_net.h>\n+\n+#define ETH_VHOST_IFACE_ARG\t\t\"iface\"\n+#define ETH_VHOST_QUEUES_ARG\t\t\"queues\"\n+\n+static const char *drivername = \"VHOST PMD\";\n+\n+static const char *valid_arguments[] = {\n+\tETH_VHOST_IFACE_ARG,\n+\tETH_VHOST_QUEUES_ARG,\n+\tNULL\n+};\n+\n+static struct ether_addr base_eth_addr = {\n+\t.addr_bytes = {\n+\t\t0x56 /* V */,\n+\t\t0x48 /* H */,\n+\t\t0x4F /* O */,\n+\t\t0x53 /* S */,\n+\t\t0x54 /* T */,\n+\t\t0x00\n+\t}\n+};\n+\n+struct vhost_queue {\n+\trte_atomic32_t allow_queuing;\n+\trte_atomic32_t while_queuing;\n+\tstruct virtio_net *device;\n+\tstruct pmd_internal *internal;\n+\tstruct rte_mempool *mb_pool;\n+\tuint16_t virtqueue_id;\n+\tuint64_t rx_pkts;\n+\tuint64_t tx_pkts;\n+\tuint64_t missed_pkts;\n+\tuint64_t rx_bytes;\n+\tuint64_t tx_bytes;\n+};\n+\n+struct pmd_internal {\n+\tTAILQ_ENTRY(pmd_internal) next;\n+\tchar *dev_name;\n+\tchar *iface_name;\n+\tunsigned nb_rx_queues;\n+\tunsigned nb_tx_queues;\n+\n+\tstruct vhost_queue *rx_vhost_queues[RTE_MAX_QUEUES_PER_PORT];\n+\tstruct vhost_queue *tx_vhost_queues[RTE_MAX_QUEUES_PER_PORT];\n+\n+\tvolatile uint16_t once;\n+};\n+\n+TAILQ_HEAD(pmd_internal_head, pmd_internal);\n+static struct pmd_internal_head internals_list =\n+\tTAILQ_HEAD_INITIALIZER(internals_list);\n+\n+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;\n+\n+static rte_atomic16_t nb_started_ports;\n+static pthread_t session_th;\n+\n+static struct rte_eth_link pmd_link = {\n+\t\t.link_speed = 10000,\n+\t\t.link_duplex = ETH_LINK_FULL_DUPLEX,\n+\t\t.link_status = 0\n+};\n+\n+static uint16_t\n+eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n+{\n+\tstruct vhost_queue *r = q;\n+\tuint16_t i, nb_rx = 0;\n+\n+\tif (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))\n+\t\treturn 0;\n+\n+\trte_atomic32_set(&r->while_queuing, 1);\n+\n+\tif (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))\n+\t\tgoto out;\n+\n+\t/* Dequeue packets from guest TX queue */\n+\tnb_rx = rte_vhost_dequeue_burst(r->device,\n+\t\t\tr->virtqueue_id, r->mb_pool, bufs, nb_bufs);\n+\n+\tr->rx_pkts += nb_rx;\n+\n+\tfor (i = 0; likely(i < nb_rx); i++)\n+\t\tr->rx_bytes += bufs[i]->pkt_len;\n+\n+out:\n+\trte_atomic32_set(&r->while_queuing, 0);\n+\n+\treturn nb_rx;\n+}\n+\n+static uint16_t\n+eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n+{\n+\tstruct vhost_queue *r = q;\n+\tuint16_t i, nb_tx = 0;\n+\n+\tif (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))\n+\t\treturn 0;\n+\n+\trte_atomic32_set(&r->while_queuing, 1);\n+\n+\tif (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))\n+\t\tgoto out;\n+\n+\t/* Enqueue packets to guest RX queue */\n+\tnb_tx = rte_vhost_enqueue_burst(r->device,\n+\t\t\tr->virtqueue_id, bufs, nb_bufs);\n+\n+\tr->tx_pkts += nb_tx;\n+\tr->missed_pkts += nb_bufs - nb_tx;\n+\n+\tfor (i = 0; likely(i < nb_tx); i++)\n+\t\tr->tx_bytes += bufs[i]->pkt_len;\n+\n+\tfor (i = 0; likely(i < nb_tx); i++)\n+\t\trte_pktmbuf_free(bufs[i]);\n+out:\n+\trte_atomic32_set(&r->while_queuing, 0);\n+\n+\treturn nb_tx;\n+}\n+\n+static int\n+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+static inline struct pmd_internal *\n+find_internal_resource(char *ifname)\n+{\n+\tint found = 0;\n+\tstruct pmd_internal *internal;\n+\n+\tif (ifname == NULL)\n+\t\treturn NULL;\n+\n+\tpthread_mutex_lock(&internal_list_lock);\n+\n+\tTAILQ_FOREACH(internal, &internals_list, next) {\n+\t\tif (!strcmp(internal->iface_name, ifname)) {\n+\t\t\tfound = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tpthread_mutex_unlock(&internal_list_lock);\n+\n+\tif (!found)\n+\t\treturn NULL;\n+\n+\treturn internal;\n+}\n+\n+static int\n+new_device(struct virtio_net *dev)\n+{\n+\tstruct rte_eth_dev *eth_dev;\n+\tstruct pmd_internal *internal;\n+\tstruct vhost_queue *vq;\n+\tunsigned i;\n+\n+\tif (dev == NULL) {\n+\t\tRTE_LOG(INFO, PMD, \"Invalid argument\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tinternal = find_internal_resource(dev->ifname);\n+\tif (internal == NULL) {\n+\t\tRTE_LOG(INFO, PMD, \"Invalid device name\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\teth_dev = rte_eth_dev_allocated(internal->dev_name);\n+\tif (eth_dev == NULL) {\n+\t\tRTE_LOG(INFO, PMD, \"Failed to find a ethdev\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < internal->nb_rx_queues; i++) {\n+\t\tvq = internal->rx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\tvq->device = dev;\n+\t\tvq->internal = internal;\n+\t\trte_vhost_enable_guest_notification(\n+\t\t\t\tdev, vq->virtqueue_id, 0);\n+\t}\n+\tfor (i = 0; i < internal->nb_tx_queues; i++) {\n+\t\tvq = internal->tx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\tvq->device = dev;\n+\t\tvq->internal = internal;\n+\t\trte_vhost_enable_guest_notification(\n+\t\t\t\tdev, vq->virtqueue_id, 0);\n+\t}\n+\n+\tdev->flags |= VIRTIO_DEV_RUNNING;\n+\tdev->pmd_priv = eth_dev;\n+\teth_dev->data->dev_link.link_status = 1;\n+\n+\tfor (i = 0; i < internal->nb_rx_queues; i++) {\n+\t\tvq = internal->rx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\trte_atomic32_set(&vq->allow_queuing, 1);\n+\t}\n+\tfor (i = 0; i < internal->nb_tx_queues; i++) {\n+\t\tvq = internal->tx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\trte_atomic32_set(&vq->allow_queuing, 1);\n+\t}\n+\tRTE_LOG(INFO, PMD, \"New connection established\\n\");\n+\n+\treturn 0;\n+}\n+\n+static void\n+destroy_device(volatile struct virtio_net *dev)\n+{\n+\tstruct rte_eth_dev *eth_dev;\n+\tstruct pmd_internal *internal;\n+\tstruct vhost_queue *vq;\n+\tunsigned i;\n+\n+\tif (dev == NULL) {\n+\t\tRTE_LOG(INFO, PMD, \"Invalid argument\\n\");\n+\t\treturn;\n+\t}\n+\n+\teth_dev = (struct rte_eth_dev *)dev->pmd_priv;\n+\tif (eth_dev == NULL) {\n+\t\tRTE_LOG(INFO, PMD, \"Failed to find a ethdev\\n\");\n+\t\treturn;\n+\t}\n+\n+\tinternal = eth_dev->data->dev_private;\n+\n+\t/* Wait until rx/tx_pkt_burst stops accessing vhost device */\n+\tfor (i = 0; i < internal->nb_rx_queues; i++) {\n+\t\tvq = internal->rx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\trte_atomic32_set(&vq->allow_queuing, 0);\n+\t\twhile (rte_atomic32_read(&vq->while_queuing))\n+\t\t\trte_pause();\n+\t}\n+\tfor (i = 0; i < internal->nb_tx_queues; i++) {\n+\t\tvq = internal->tx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\trte_atomic32_set(&vq->allow_queuing, 0);\n+\t\twhile (rte_atomic32_read(&vq->while_queuing))\n+\t\t\trte_pause();\n+\t}\n+\n+\teth_dev->data->dev_link.link_status = 0;\n+\n+\tdev->pmd_priv = NULL;\n+\tdev->flags &= ~VIRTIO_DEV_RUNNING;\n+\n+\tfor (i = 0; i < internal->nb_rx_queues; i++) {\n+\t\tvq = internal->rx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\tvq->device = NULL;\n+\t}\n+\tfor (i = 0; i < internal->nb_tx_queues; i++) {\n+\t\tvq = internal->tx_vhost_queues[i];\n+\t\tif (vq == NULL)\n+\t\t\tcontinue;\n+\t\tvq->device = NULL;\n+\t}\n+\n+\tRTE_LOG(INFO, PMD, \"Connection closed\\n\");\n+}\n+\n+static void *\n+vhost_driver_session(void *param __rte_unused)\n+{\n+\tstatic struct virtio_net_device_ops vhost_ops;\n+\n+\t/* set vhost arguments */\n+\tvhost_ops.new_device = new_device;\n+\tvhost_ops.destroy_device = destroy_device;\n+\tif (rte_vhost_driver_pmd_callback_register(&vhost_ops) < 0)\n+\t\trte_panic(\"Can't register callbacks\\n\");\n+\n+\t/* start event handling */\n+\trte_vhost_driver_session_start();\n+\n+\tpthread_exit(0);\n+}\n+\n+static void\n+vhost_driver_session_start(void)\n+{\n+\tint ret;\n+\n+\tret = pthread_create(&session_th,\n+\t\t\tNULL, vhost_driver_session, NULL);\n+\tif (ret)\n+\t\trte_panic(\"Can't create a thread\\n\");\n+}\n+\n+static void\n+vhost_driver_session_stop(void)\n+{\n+\tint ret;\n+\n+\tret = pthread_cancel(session_th);\n+\tif (ret)\n+\t\trte_panic(\"Can't cancel the thread\\n\");\n+\n+\tret = pthread_join(session_th, NULL);\n+\tif (ret)\n+\t\trte_panic(\"Can't join the thread\\n\");\n+}\n+\n+static int\n+eth_dev_start(struct rte_eth_dev *dev)\n+{\n+\tint ret;\n+\tstruct pmd_internal *internal = dev->data->dev_private;\n+\n+\tif (rte_atomic16_cmpset(&internal->once, 0, 1)) {\n+\t\tret = rte_vhost_driver_register(internal->iface_name);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\t/* We need only one message handling thread */\n+\tif (rte_atomic16_add_return(&nb_started_ports, 1) == 1)\n+\t\tvhost_driver_session_start();\n+\n+\treturn 0;\n+}\n+\n+static void\n+eth_dev_stop(struct rte_eth_dev *dev)\n+{\n+\tstruct pmd_internal *internal = dev->data->dev_private;\n+\n+\tif (rte_atomic16_cmpset(&internal->once, 1, 0))\n+\t\trte_vhost_driver_unregister(internal->iface_name);\n+\n+\tif (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)\n+\t\tvhost_driver_session_stop();\n+}\n+\n+static int\n+eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\t   uint16_t nb_rx_desc __rte_unused,\n+\t\t   unsigned int socket_id,\n+\t\t   const struct rte_eth_rxconf *rx_conf __rte_unused,\n+\t\t   struct rte_mempool *mb_pool)\n+{\n+\tstruct pmd_internal *internal = dev->data->dev_private;\n+\tstruct vhost_queue *vq;\n+\n+\trte_free(internal->rx_vhost_queues[rx_queue_id]);\n+\n+\tvq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),\n+\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (vq == NULL) {\n+\t\tRTE_LOG(ERR, PMD, \"Failed to allocate memory for rx queue\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tvq->mb_pool = mb_pool;\n+\tvq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;\n+\tinternal->rx_vhost_queues[rx_queue_id] = vq;\n+\tdev->data->rx_queues[rx_queue_id] = vq;\n+\treturn 0;\n+}\n+\n+static int\n+eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\t\t   uint16_t nb_tx_desc __rte_unused,\n+\t\t   unsigned int socket_id,\n+\t\t   const struct rte_eth_txconf *tx_conf __rte_unused)\n+{\n+\tstruct pmd_internal *internal = dev->data->dev_private;\n+\tstruct vhost_queue *vq;\n+\n+\trte_free(internal->tx_vhost_queues[tx_queue_id]);\n+\n+\tvq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),\n+\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (vq == NULL) {\n+\t\tRTE_LOG(ERR, PMD, \"Failed to allocate memory for tx queue\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tvq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;\n+\tinternal->tx_vhost_queues[tx_queue_id] = vq;\n+\tdev->data->tx_queues[tx_queue_id] = vq;\n+\treturn 0;\n+}\n+\n+\n+static void\n+eth_dev_info(struct rte_eth_dev *dev,\n+\t     struct rte_eth_dev_info *dev_info)\n+{\n+\tstruct pmd_internal *internal = dev->data->dev_private;\n+\n+\tdev_info->driver_name = drivername;\n+\tdev_info->max_mac_addrs = 1;\n+\tdev_info->max_rx_pktlen = (uint32_t)-1;\n+\tdev_info->max_rx_queues = (uint16_t)internal->nb_rx_queues;\n+\tdev_info->max_tx_queues = (uint16_t)internal->nb_tx_queues;\n+\tdev_info->min_rx_bufsize = 0;\n+}\n+\n+static void\n+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)\n+{\n+\tunsigned i;\n+\tunsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;\n+\tunsigned long rx_total_bytes = 0, tx_total_bytes = 0;\n+\tconst struct pmd_internal *internal = dev->data->dev_private;\n+\n+\tfor (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&\n+\t     i < internal->nb_rx_queues; i++) {\n+\t\tif (internal->rx_vhost_queues[i] == NULL)\n+\t\t\tcontinue;\n+\t\tigb_stats->q_ipackets[i] = internal->rx_vhost_queues[i]->rx_pkts;\n+\t\trx_total += igb_stats->q_ipackets[i];\n+\n+\t\tigb_stats->q_ibytes[i] = internal->rx_vhost_queues[i]->rx_bytes;\n+\t\trx_total_bytes += igb_stats->q_ibytes[i];\n+\t}\n+\n+\tfor (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&\n+\t     i < internal->nb_tx_queues; i++) {\n+\t\tif (internal->tx_vhost_queues[i] == NULL)\n+\t\t\tcontinue;\n+\t\tigb_stats->q_opackets[i] = internal->tx_vhost_queues[i]->tx_pkts;\n+\t\ttx_missed_total += internal->tx_vhost_queues[i]->missed_pkts;\n+\t\ttx_total += igb_stats->q_opackets[i];\n+\n+\t\tigb_stats->q_obytes[i] = internal->tx_vhost_queues[i]->tx_bytes;\n+\t\ttx_total_bytes += igb_stats->q_obytes[i];\n+\t}\n+\n+\tigb_stats->ipackets = rx_total;\n+\tigb_stats->opackets = tx_total;\n+\tigb_stats->imissed = tx_missed_total;\n+\tigb_stats->ibytes = rx_total_bytes;\n+\tigb_stats->obytes = tx_total_bytes;\n+}\n+\n+static void\n+eth_stats_reset(struct rte_eth_dev *dev)\n+{\n+\tunsigned i;\n+\tstruct pmd_internal *internal = dev->data->dev_private;\n+\n+\tfor (i = 0; i < internal->nb_rx_queues; i++) {\n+\t\tif (internal->rx_vhost_queues[i] == NULL)\n+\t\t\tcontinue;\n+\t\tinternal->rx_vhost_queues[i]->rx_pkts = 0;\n+\t\tinternal->rx_vhost_queues[i]->rx_bytes = 0;\n+\t}\n+\tfor (i = 0; i < internal->nb_tx_queues; i++) {\n+\t\tif (internal->tx_vhost_queues[i] == NULL)\n+\t\t\tcontinue;\n+\t\tinternal->tx_vhost_queues[i]->tx_pkts = 0;\n+\t\tinternal->tx_vhost_queues[i]->tx_bytes = 0;\n+\t\tinternal->tx_vhost_queues[i]->missed_pkts = 0;\n+\t}\n+}\n+\n+static void\n+eth_queue_release(void *q __rte_unused)\n+{\n+\treturn;\n+}\n+\n+static int\n+eth_link_update(struct rte_eth_dev *dev __rte_unused,\n+\t\tint wait_to_complete __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+static const struct eth_dev_ops ops = {\n+\t.dev_start = eth_dev_start,\n+\t.dev_stop = eth_dev_stop,\n+\t.dev_configure = eth_dev_configure,\n+\t.dev_infos_get = eth_dev_info,\n+\t.rx_queue_setup = eth_rx_queue_setup,\n+\t.tx_queue_setup = eth_tx_queue_setup,\n+\t.rx_queue_release = eth_queue_release,\n+\t.tx_queue_release = eth_queue_release,\n+\t.link_update = eth_link_update,\n+\t.stats_get = eth_stats_get,\n+\t.stats_reset = eth_stats_reset,\n+};\n+\n+static int\n+eth_dev_vhost_create(const char *name, int index,\n+\t\t     char *iface_name,\n+\t\t     int16_t queues,\n+\t\t     const unsigned numa_node)\n+{\n+\tstruct rte_eth_dev_data *data = NULL;\n+\tstruct pmd_internal *internal = NULL;\n+\tstruct rte_eth_dev *eth_dev = NULL;\n+\tstruct ether_addr *eth_addr = NULL;\n+\n+\tRTE_LOG(INFO, PMD, \"Creating VHOST-USER backend on numa socket %u\\n\",\n+\t\tnuma_node);\n+\n+\t/* now do all data allocation - for eth_dev structure, dummy pci driver\n+\t * and internal (private) data\n+\t */\n+\tdata = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);\n+\tif (data == NULL)\n+\t\tgoto error;\n+\n+\tinternal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);\n+\tif (internal == NULL)\n+\t\tgoto error;\n+\n+\teth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);\n+\tif (eth_addr == NULL)\n+\t\tgoto error;\n+\t*eth_addr = base_eth_addr;\n+\teth_addr->addr_bytes[5] = index;\n+\n+\t/* reserve an ethdev entry */\n+\teth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);\n+\tif (eth_dev == NULL)\n+\t\tgoto error;\n+\n+\t/* now put it all together\n+\t * - store queue data in internal,\n+\t * - store numa_node info in ethdev data\n+\t * - point eth_dev_data to internals\n+\t * - and point eth_dev structure to new eth_dev_data structure\n+\t */\n+\tinternal->nb_rx_queues = queues;\n+\tinternal->nb_tx_queues = queues;\n+\tinternal->dev_name = strdup(name);\n+\tif (internal->dev_name == NULL)\n+\t\tgoto error;\n+\tinternal->iface_name = strdup(iface_name);\n+\tif (internal->iface_name == NULL) {\n+\t\tfree(internal->dev_name);\n+\t\tgoto error;\n+\t}\n+\n+\tpthread_mutex_lock(&internal_list_lock);\n+\tTAILQ_INSERT_TAIL(&internals_list, internal, next);\n+\tpthread_mutex_unlock(&internal_list_lock);\n+\n+\tdata->dev_private = internal;\n+\tdata->port_id = eth_dev->data->port_id;\n+\tmemmove(data->name, eth_dev->data->name, sizeof(data->name));\n+\tdata->nb_rx_queues = queues;\n+\tdata->nb_tx_queues = queues;\n+\tdata->dev_link = pmd_link;\n+\tdata->mac_addrs = eth_addr;\n+\n+\t/* We'll replace the 'data' originally allocated by eth_dev. So the\n+\t * vhost PMD resources won't be shared between multi processes.\n+\t */\n+\teth_dev->data = data;\n+\teth_dev->dev_ops = &ops;\n+\teth_dev->driver = NULL;\n+\teth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;\n+\teth_dev->data->kdrv = RTE_KDRV_NONE;\n+\teth_dev->data->drv_name = internal->dev_name;\n+\teth_dev->data->numa_node = numa_node;\n+\n+\t/* finally assign rx and tx ops */\n+\teth_dev->rx_pkt_burst = eth_vhost_rx;\n+\teth_dev->tx_pkt_burst = eth_vhost_tx;\n+\n+\treturn data->port_id;\n+\n+error:\n+\trte_free(data);\n+\trte_free(internal);\n+\trte_free(eth_addr);\n+\n+\treturn -1;\n+}\n+\n+static inline int\n+open_iface(const char *key __rte_unused, const char *value, void *extra_args)\n+{\n+\tconst char **iface_name = extra_args;\n+\n+\tif (value == NULL)\n+\t\treturn -1;\n+\n+\t*iface_name = value;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+open_queues(const char *key __rte_unused, const char *value, void *extra_args)\n+{\n+\tuint16_t *q = extra_args;\n+\n+\tif ((value == NULL) || (extra_args == NULL))\n+\t\treturn -EINVAL;\n+\n+\t*q = (uint16_t)strtoul(value, NULL, 0);\n+\tif ((*q == USHRT_MAX) && (errno == ERANGE))\n+\t\treturn -1;\n+\n+\tif (*q > RTE_MAX_QUEUES_PER_PORT)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+rte_pmd_vhost_devinit(const char *name, const char *params)\n+{\n+\tstruct rte_kvargs *kvlist = NULL;\n+\tint ret = 0;\n+\tint index;\n+\tchar *iface_name;\n+\tuint16_t queues;\n+\n+\tRTE_LOG(INFO, PMD, \"Initializing pmd_vhost for %s\\n\", name);\n+\n+\tif (strlen(name) < strlen(\"eth_vhost\"))\n+\t\treturn -1;\n+\n+\tindex = strtol(name + strlen(\"eth_vhost\"), NULL, 0);\n+\tif (errno == ERANGE)\n+\t\treturn -1;\n+\n+\tkvlist = rte_kvargs_parse(params, valid_arguments);\n+\tif (kvlist == NULL)\n+\t\treturn -1;\n+\n+\tif (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {\n+\t\tret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,\n+\t\t\t\t\t &open_iface, &iface_name);\n+\t\tif (ret < 0)\n+\t\t\tgoto out_free;\n+\t} else {\n+\t\tret = -1;\n+\t\tgoto out_free;\n+\t}\n+\n+\tif (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {\n+\t\tret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,\n+\t\t\t\t\t &open_queues, &queues);\n+\t\tif (ret < 0)\n+\t\t\tgoto out_free;\n+\n+\t} else\n+\t\tqueues = 1;\n+\n+\teth_dev_vhost_create(name, index,\n+\t\t\tiface_name, queues, rte_socket_id());\n+\n+out_free:\n+\trte_kvargs_free(kvlist);\n+\treturn ret;\n+}\n+\n+static int\n+rte_pmd_vhost_devuninit(const char *name)\n+{\n+\tstruct rte_eth_dev *eth_dev = NULL;\n+\tstruct pmd_internal *internal;\n+\tunsigned int i;\n+\n+\tRTE_LOG(INFO, PMD, \"Un-Initializing pmd_vhost for %s\\n\", name);\n+\n+\tif (name == NULL)\n+\t\treturn -EINVAL;\n+\n+\t/* find an ethdev entry */\n+\teth_dev = rte_eth_dev_allocated(name);\n+\tif (eth_dev == NULL)\n+\t\treturn -ENODEV;\n+\n+\tinternal = eth_dev->data->dev_private;\n+\n+\tpthread_mutex_lock(&internal_list_lock);\n+\tTAILQ_REMOVE(&internals_list, internal, next);\n+\tpthread_mutex_unlock(&internal_list_lock);\n+\n+\teth_dev_stop(eth_dev);\n+\n+\tif ((internal) && (internal->dev_name))\n+\t\tfree(internal->dev_name);\n+\tif ((internal) && (internal->iface_name))\n+\t\tfree(internal->iface_name);\n+\n+\trte_free(eth_dev->data->mac_addrs);\n+\trte_free(eth_dev->data);\n+\n+\tfor (i = 0; i < internal->nb_rx_queues; i++)\n+\t\trte_free(internal->rx_vhost_queues[i]);\n+\tfor (i = 0; i < internal->nb_tx_queues; i++)\n+\t\trte_free(internal->tx_vhost_queues[i]);\n+\trte_free(internal);\n+\n+\trte_eth_dev_release_port(eth_dev);\n+\n+\treturn 0;\n+}\n+\n+static struct rte_driver pmd_vhost_drv = {\n+\t.name = \"eth_vhost\",\n+\t.type = PMD_VDEV,\n+\t.init = rte_pmd_vhost_devinit,\n+\t.uninit = rte_pmd_vhost_devuninit,\n+};\n+\n+PMD_REGISTER_DRIVER(pmd_vhost_drv);\ndiff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map\nnew file mode 100644\nindex 0000000..bf0361a\n--- /dev/null\n+++ b/drivers/net/vhost/rte_pmd_vhost_version.map\n@@ -0,0 +1,8 @@\n+DPDK_2.2 {\n+\n+\tglobal:\n+\n+\trte_eth_vhost_portid2vdev;\n+\n+\tlocal: *;\n+};\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex 148653e..542df30 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -151,7 +151,13 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP)       += -lrte_pmd_pcap\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET)  += -lrte_pmd_af_packet\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL)       += -lrte_pmd_null\n \n-endif # ! $(CONFIG_RTE_BUILD_SHARED_LIB)\n+ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)\n+\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST)      += -lrte_pmd_vhost\n+\n+endif # ! $(CONFIG_RTE_LIBRTE_VHOST)\n+\n+endif # $(CONFIG_RTE_BUILD_SHARED_LIB)\n \n endif # ! CONFIG_RTE_BUILD_COMBINE_LIBS\n \n",
    "prefixes": [
        "dpdk-dev",
        "v5",
        "2/3"
    ]
}