get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/54986/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 54986,
    "url": "http://patches.dpdk.org/api/patches/54986/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1560957293-17294-21-git-send-email-ndragazis@arrikto.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1560957293-17294-21-git-send-email-ndragazis@arrikto.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1560957293-17294-21-git-send-email-ndragazis@arrikto.com",
    "date": "2019-06-19T15:14:45",
    "name": "[20/28] drivers: add virtio-vhost-user transport",
    "commit_ref": null,
    "pull_url": null,
    "state": "rfc",
    "archived": true,
    "hash": "73814efae14dcc109c4528461922b5dee24d27df",
    "submitter": {
        "id": 1339,
        "url": "http://patches.dpdk.org/api/people/1339/?format=api",
        "name": "Nikos Dragazis",
        "email": "ndragazis@arrikto.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1560957293-17294-21-git-send-email-ndragazis@arrikto.com/mbox/",
    "series": [
        {
            "id": 5082,
            "url": "http://patches.dpdk.org/api/series/5082/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=5082",
            "date": "2019-06-19T15:14:25",
            "name": "vhost: add virtio-vhost-user transport",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/5082/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/54986/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/54986/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C93C31C427;\n\tWed, 19 Jun 2019 17:16:38 +0200 (CEST)",
            "from mx0.arrikto.com (mx0.arrikto.com [212.71.252.59])\n\tby dpdk.org (Postfix) with ESMTP id 52BEA1C392\n\tfor <dev@dpdk.org>; Wed, 19 Jun 2019 17:15:46 +0200 (CEST)",
            "from troi.prod.arr (mail.arr [10.99.0.5])\n\tby mx0.arrikto.com (Postfix) with ESMTP id 13875182018;\n\tWed, 19 Jun 2019 18:15:46 +0300 (EEST)",
            "from localhost.localdomain (unknown [10.89.50.133])\n\tby troi.prod.arr (Postfix) with ESMTPSA id 8C61B32C;\n\tWed, 19 Jun 2019 18:15:44 +0300 (EEST)"
        ],
        "From": "Nikos Dragazis <ndragazis@arrikto.com>",
        "To": "dev@dpdk.org",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>,\n\tTiwei Bie <tiwei.bie@intel.com>, Zhihong Wang <zhihong.wang@intel.com>,\n\tStefan Hajnoczi <stefanha@redhat.com>, Wei Wang <wei.w.wang@intel.com>,\n\tStojaczyk Dariusz <dariusz.stojaczyk@intel.com>,\n\tVangelis Koukis <vkoukis@arrikto.com>",
        "Date": "Wed, 19 Jun 2019 18:14:45 +0300",
        "Message-Id": "<1560957293-17294-21-git-send-email-ndragazis@arrikto.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1560957293-17294-1-git-send-email-ndragazis@arrikto.com>",
        "References": "<1560957293-17294-1-git-send-email-ndragazis@arrikto.com>",
        "Subject": "[dpdk-dev] [PATCH 20/28] drivers: add virtio-vhost-user transport",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch introduces the virtio-vhost-user transport. This transport is\nbased on the virtio-vhost-user device. This device replaces the AF_UNIX\nsocket used by the vhost-user protocol with a virtio device that tunnels\nvhost-user protocol messages.  This allows a guest to act as a vhost\ndevice backend for other guests.\n\nFor more information on virtio-vhost-user, see\nhttps://wiki.qemu.org/Features/VirtioVhostUser.\n\nSigned-off-by: Nikos Dragazis <ndragazis@arrikto.com>\nSigned-off-by: Stefan Hajnoczi <stefanha@redhat.com>\n---\n drivers/Makefile                                   |    2 +\n drivers/virtio_vhost_user/Makefile                 |   27 +\n .../rte_virtio_vhost_user_version.map              |    4 +\n .../virtio_vhost_user/trans_virtio_vhost_user.c    | 1067 ++++++++++++++++++++\n drivers/virtio_vhost_user/virtio_vhost_user.h      |   18 +\n 5 files changed, 1118 insertions(+)\n create mode 100644 drivers/virtio_vhost_user/Makefile\n create mode 100644 drivers/virtio_vhost_user/rte_virtio_vhost_user_version.map\n create mode 100644 drivers/virtio_vhost_user/trans_virtio_vhost_user.c\n create mode 100644 drivers/virtio_vhost_user/virtio_vhost_user.h",
    "diff": "diff --git a/drivers/Makefile b/drivers/Makefile\nindex 7d5da5d..72e2579 100644\n--- a/drivers/Makefile\n+++ b/drivers/Makefile\n@@ -22,5 +22,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event\n DEPDIRS-event := common bus mempool net\n DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += raw\n DEPDIRS-raw := common bus mempool net event\n+DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += virtio_vhost_user\n+DEPDIRS-virtio_vhost_user := bus\n \n include $(RTE_SDK)/mk/rte.subdir.mk\ndiff --git a/drivers/virtio_vhost_user/Makefile b/drivers/virtio_vhost_user/Makefile\nnew file mode 100644\nindex 0000000..61a77b6\n--- /dev/null\n+++ b/drivers/virtio_vhost_user/Makefile\n@@ -0,0 +1,27 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2019 Arrikto Inc.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+# library name\n+LIB = librte_virtio_vhost_user.a\n+\n+EXPORT_MAP := rte_virtio_vhost_user_version.map\n+\n+LIBABIVER := 1\n+\n+CFLAGS += -DALLOW_EXPERIMENTAL_API\n+CFLAGS += $(WERROR_FLAGS) -O3\n+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring\n+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs\n+LDLIBS += -lrte_bus_pci\n+\n+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)\n+LDLIBS += -lrte_vhost\n+endif\n+\n+# all source are stored in SRCS-y\n+SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := trans_virtio_vhost_user.c \\\n+\t\t\t\t   virtio_pci.c\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/virtio_vhost_user/rte_virtio_vhost_user_version.map b/drivers/virtio_vhost_user/rte_virtio_vhost_user_version.map\nnew file mode 100644\nindex 0000000..4b2e621\n--- /dev/null\n+++ b/drivers/virtio_vhost_user/rte_virtio_vhost_user_version.map\n@@ -0,0 +1,4 @@\n+DPDK_19.05 {\n+\n+        local: *;\n+};\ndiff --git a/drivers/virtio_vhost_user/trans_virtio_vhost_user.c b/drivers/virtio_vhost_user/trans_virtio_vhost_user.c\nnew file mode 100644\nindex 0000000..72018a4\n--- /dev/null\n+++ b/drivers/virtio_vhost_user/trans_virtio_vhost_user.c\n@@ -0,0 +1,1067 @@\n+/* SPDX-License-Idenitifier: BSD-3-Clause\n+ * Copyright(c) 2018 Red Hat, Inc.\n+ * Copyright(c) 2019 Arrikto, Inc.\n+ */\n+\n+/*\n+ * @file\n+ * virtio-vhost-user PCI transport driver\n+ *\n+ * This vhost-user transport communicates with the vhost-user master process\n+ * over the virtio-vhost-user PCI device.\n+ *\n+ * Interrupts are used since this is the control path, not the data path.  This\n+ * way the vhost-user command processing doesn't interfere with packet\n+ * processing.  This is similar to the AF_UNIX transport's fdman thread that\n+ * processes socket I/O separately.\n+ *\n+ * This transport replaces the usual vhost-user file descriptor passing with a\n+ * PCI BAR that contains doorbell registers for callfd and logfd, and shared\n+ * memory for the memory table regions.\n+ *\n+ * VIRTIO device specification:\n+ * https://stefanha.github.io/virtio/vhost-user-slave.html#x1-2830007\n+ */\n+\n+#include <rte_log.h>\n+#include <rte_malloc.h>\n+#include <rte_bus_pci.h>\n+#include <rte_io.h>\n+\n+#include \"vhost.h\"\n+#include \"virtio_pci.h\"\n+#include \"virtqueue.h\"\n+#include \"virtio_vhost_user.h\"\n+#include \"vhost_user.h\"\n+\n+/*\n+ * Data structures:\n+ *\n+ * Successfully probed virtio-vhost-user PCI adapters are added to\n+ * vvu_pci_device_list as struct vvu_pci_device elements.\n+ *\n+ * When rte_vhost_driver_register() is called, a struct vvu_socket is created\n+ * as the endpoint for future vhost-user connections.  The struct vvu_socket is\n+ * associated with the struct vvu_pci_device that will be used for\n+ * communication.\n+ *\n+ * When a vhost-user protocol connection is established, a struct\n+ * vvu_connection is created and the application's new_device(int vid) callback\n+ * is invoked.\n+ */\n+\n+/** Probed PCI devices for lookup by rte_vhost_driver_register() */\n+TAILQ_HEAD(, vvu_pci_device) vvu_pci_device_list =\n+\tTAILQ_HEAD_INITIALIZER(vvu_pci_device_list);\n+\n+struct vvu_socket;\n+struct vvu_connection;\n+\n+/** A virtio-vhost-user PCI adapter */\n+struct vvu_pci_device {\n+\tstruct virtio_hw hw;\n+\tstruct rte_pci_device *pci_dev;\n+\tstruct vvu_socket *vvu_socket;\n+\tTAILQ_ENTRY(vvu_pci_device) next;\n+};\n+\n+/** A vhost-user endpoint (aka per-path state) */\n+struct vvu_socket {\n+\tstruct vhost_user_socket socket; /* must be first field! */\n+\tstruct vvu_pci_device *pdev;\n+\tstruct vvu_connection *conn;\n+\n+\t/** Doorbell registers */\n+\tuint16_t *doorbells;\n+\n+\t/** This struct virtio_vhost_user_config field determines the number of\n+\t * doorbells available so we keep it saved.\n+\t */\n+\tuint32_t max_vhost_queues;\n+\n+\t/** Receive buffers */\n+\tconst struct rte_memzone *rxbuf_mz;\n+\n+\t/** Transmit buffers.  It is assumed that the device completes them\n+\t * in-order so a single wrapping index can be used to select the next\n+\t * free buffer.\n+\t */\n+\tconst struct rte_memzone *txbuf_mz;\n+\tunsigned int txbuf_idx;\n+};\n+\n+/** A vhost-user protocol session (aka per-vid state) */\n+struct vvu_connection {\n+\tstruct virtio_net device; /* must be first field! */\n+\tstruct vvu_socket *vvu_socket;\n+};\n+\n+/** Virtio feature bits that we support */\n+#define VVU_VIRTIO_FEATURES ((1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \\\n+\t\t\t     (1ULL << VIRTIO_F_ANY_LAYOUT) | \\\n+\t\t\t     (1ULL << VIRTIO_F_VERSION_1) | \\\n+\t\t\t     (1ULL << VIRTIO_F_IOMMU_PLATFORM))\n+\n+/** Virtqueue indices */\n+enum {\n+\tVVU_VQ_RX,\n+\tVVU_VQ_TX,\n+\tVVU_VQ_MAX,\n+};\n+\n+enum {\n+\t/** Receive buffer size, in bytes */\n+\tVVU_RXBUF_SIZE = 1024,\n+\n+\t/** Transmit buffer size, in bytes */\n+\tVVU_TXBUF_SIZE = 1024,\n+};\n+\n+/** Look up a struct vvu_pci_device from a DomBDF string */\n+static struct vvu_pci_device *\n+vvu_pci_by_name(const char *name)\n+{\n+\tstruct vvu_pci_device *pdev;\n+\n+\tTAILQ_FOREACH(pdev, &vvu_pci_device_list, next) {\n+\t\tif (!strcmp(pdev->pci_dev->device.name, name))\n+\t\t\treturn pdev;\n+\t}\n+\treturn NULL;\n+}\n+\n+/** Start connection establishment */\n+static void\n+vvu_connect(struct vvu_socket *vvu_socket)\n+{\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tuint32_t status;\n+\n+\tvirtio_pci_read_dev_config(hw,\n+\t\t\toffsetof(struct virtio_vhost_user_config, status),\n+\t\t\t&status, sizeof(status));\n+\tstatus |= RTE_LE32(1u << VIRTIO_VHOST_USER_STATUS_SLAVE_UP);\n+\tvirtio_pci_write_dev_config(hw,\n+\t\t\toffsetof(struct virtio_vhost_user_config, status),\n+\t\t\t&status, sizeof(status));\n+}\n+\n+static void\n+vvu_disconnect(struct vvu_socket *vvu_socket)\n+{\n+\tstruct vhost_user_socket *vsocket = &vvu_socket->socket;\n+\tstruct vvu_connection *conn = vvu_socket->conn;\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tuint32_t status;\n+\n+\tif (conn) {\n+\t\tif (vsocket->notify_ops->destroy_connection)\n+\t\t\tvsocket->notify_ops->destroy_connection(conn->device.vid);\n+\n+\t\tvhost_destroy_device(conn->device.vid);\n+\t}\n+\n+\t/* Make sure we're disconnected */\n+\tvirtio_pci_read_dev_config(hw,\n+\t\t\toffsetof(struct virtio_vhost_user_config, status),\n+\t\t\t&status, sizeof(status));\n+\tstatus &= ~RTE_LE32(1u << VIRTIO_VHOST_USER_STATUS_SLAVE_UP);\n+\tvirtio_pci_write_dev_config(hw,\n+\t\t\toffsetof(struct virtio_vhost_user_config, status),\n+\t\t\t&status, sizeof(status));\n+}\n+\n+static void\n+vvu_reconnect(struct vvu_socket *vvu_socket)\n+{\n+\tvvu_disconnect(vvu_socket);\n+\tvvu_connect(vvu_socket);\n+}\n+\n+static void vvu_process_rxq(struct vvu_socket *vvu_socket);\n+\n+static void\n+vvu_cleanup_device(struct virtio_net *dev, int destroy __rte_unused)\n+{\n+\tstruct vvu_connection *conn =\n+\t\tcontainer_of(dev, struct vvu_connection, device);\n+\tstruct vvu_socket *vvu_socket = conn->vvu_socket;\n+\n+\tvvu_socket->conn = NULL;\n+\tvvu_process_rxq(vvu_socket); /* discard old replies from master */\n+\tvvu_reconnect(vvu_socket);\n+}\n+\n+static int\n+vvu_vring_call(struct virtio_net *dev, struct vhost_virtqueue *vq)\n+{\n+\tstruct vvu_connection *conn =\n+\t\tcontainer_of(dev, struct vvu_connection, device);\n+\tstruct vvu_socket *vvu_socket = conn->vvu_socket;\n+\tuint16_t vq_idx = vq->vring_idx;\n+\n+\tRTE_LOG(DEBUG, VHOST_CONFIG, \"%s vq_idx %u\\n\", __func__, vq_idx);\n+\n+\trte_write16(rte_cpu_to_le_16(vq_idx), &vvu_socket->doorbells[vq_idx]);\n+\treturn 0;\n+}\n+\n+static int\n+vvu_send_reply(struct virtio_net *dev, struct VhostUserMsg *reply)\n+{\n+\tstruct vvu_connection *conn =\n+\t\tcontainer_of(dev, struct vvu_connection, device);\n+\tstruct vvu_socket *vvu_socket = conn->vvu_socket;\n+\tstruct virtqueue *vq = vvu_socket->pdev->hw.vqs[VVU_VQ_TX];\n+\tstruct vring_desc *desc;\n+\tstruct vq_desc_extra *descx;\n+\tunsigned int i;\n+\tvoid *buf;\n+\tsize_t len;\n+\n+\tRTE_LOG(DEBUG, VHOST_CONFIG,\n+\t\t\"%s request %u flags %#x size %u\\n\",\n+\t\t__func__, reply->request.master,\n+\t\treply->flags, reply->size);\n+\n+\t/* TODO convert reply to little-endian */\n+\n+\tif (virtqueue_full(vq)) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Out of tx buffers\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\ti = vvu_socket->txbuf_idx;\n+\tlen = VHOST_USER_HDR_SIZE + reply->size;\n+\tbuf = (uint8_t *)vvu_socket->txbuf_mz->addr + i * VVU_TXBUF_SIZE;\n+\n+\tmemcpy(buf, reply, len);\n+\n+\tdesc = &vq->vq_ring.desc[i];\n+\tdescx = &vq->vq_descx[i];\n+\n+\tdesc->addr = rte_cpu_to_le_64(vvu_socket->txbuf_mz->iova + i * VVU_TXBUF_SIZE);\n+\tdesc->len = rte_cpu_to_le_32(len);\n+\tdesc->flags = 0;\n+\n+\tdescx->cookie = buf;\n+\tdescx->ndescs = 1;\n+\n+\tvq->vq_free_cnt--;\n+\tvvu_socket->txbuf_idx = (vvu_socket->txbuf_idx + 1) & (vq->vq_nentries - 1);\n+\n+\tvq_update_avail_ring(vq, i);\n+\tvq_update_avail_idx(vq);\n+\n+\tif (virtqueue_kick_prepare(vq))\n+\t\tvirtqueue_notify(vq);\n+\n+\treturn 0;\n+}\n+\n+static int\n+vvu_map_mem_regions(struct virtio_net *dev, struct VhostUserMsg *msg __rte_unused)\n+{\n+\tstruct vvu_connection *conn =\n+\t\tcontainer_of(dev, struct vvu_connection, device);\n+\tstruct vvu_socket *vvu_socket = conn->vvu_socket;\n+\tstruct rte_pci_device *pci_dev = vvu_socket->pdev->pci_dev;\n+\tuint8_t *mmap_addr;\n+\tuint32_t i;\n+\n+\t/* Memory regions start after the doorbell registers */\n+\tmmap_addr = (uint8_t *)pci_dev->mem_resource[2].addr +\n+\t\t    RTE_ALIGN_CEIL((vvu_socket->max_vhost_queues + 1 /* log fd */) *\n+\t\t\t\t   sizeof(uint16_t), 4096);\n+\n+\tfor (i = 0; i < dev->mem->nregions; i++) {\n+\t\tstruct rte_vhost_mem_region *reg = &dev->mem->regions[i];\n+\n+\t\treg->mmap_addr = mmap_addr;\n+\t\treg->host_user_addr = (uint64_t)(uintptr_t)reg->mmap_addr +\n+\t\t\t\t      reg->mmap_size - reg->size;\n+\n+\t\tmmap_addr += reg->mmap_size;\n+\n+\t\tRTE_LOG(INFO, VHOST_CONFIG,\n+\t\t\t\"guest memory region %u, size: 0x%\" PRIx64 \"\\n\"\n+\t\t\t\"\\t guest physical addr: 0x%\" PRIx64 \"\\n\"\n+\t\t\t\"\\t guest virtual  addr: 0x%\" PRIx64 \"\\n\"\n+\t\t\t\"\\t host  virtual  addr: 0x%\" PRIx64 \"\\n\"\n+\t\t\t\"\\t mmap addr : 0x%\" PRIx64 \"\\n\"\n+\t\t\t\"\\t mmap size : 0x%\" PRIx64 \"\\n\"\n+\t\t\t\"\\t mmap off  : 0x%\" PRIx64 \"\\n\",\n+\t\t\ti, reg->size,\n+\t\t\treg->guest_phys_addr,\n+\t\t\treg->guest_user_addr,\n+\t\t\treg->host_user_addr,\n+\t\t\t(uint64_t)(uintptr_t)reg->mmap_addr,\n+\t\t\treg->mmap_size,\n+\t\t\treg->mmap_size - reg->size);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+vvu_unmap_mem_regions(struct virtio_net *dev)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < dev->mem->nregions; i++) {\n+\t\tstruct rte_vhost_mem_region *reg = &dev->mem->regions[i];\n+\n+\t\t/* Just clear the pointers, the PCI BAR stays there */\n+\t\treg->mmap_addr = NULL;\n+\t\treg->host_user_addr = 0;\n+\t}\n+}\n+\n+static void vvu_process_new_connection(struct vvu_socket *vvu_socket)\n+{\n+\tstruct vhost_user_socket *vsocket = &vvu_socket->socket;\n+\tstruct vvu_connection *conn;\n+\tstruct virtio_net *dev;\n+\tsize_t size;\n+\n+\tdev = vhost_new_device(vsocket->trans_ops);\n+\tif (!dev) {\n+\t\tvvu_reconnect(vvu_socket);\n+\t\treturn;\n+\t}\n+\n+\tconn = container_of(dev, struct vvu_connection, device);\n+\tconn->vvu_socket = vvu_socket;\n+\n+\tsize = strnlen(vsocket->path, PATH_MAX);\n+\tvhost_set_ifname(dev->vid, vsocket->path, size);\n+\n+\tRTE_LOG(INFO, VHOST_CONFIG, \"new device, handle is %d\\n\", dev->vid);\n+\n+\tif (vsocket->notify_ops->new_connection) {\n+\t\tint ret = vsocket->notify_ops->new_connection(dev->vid);\n+\t\tif (ret < 0) {\n+\t\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\t\"failed to add vhost user connection\\n\");\n+\t\t\tvhost_destroy_device(dev->vid);\n+\t\t\tvvu_reconnect(vvu_socket);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+\n+\tvvu_socket->conn = conn;\n+\treturn;\n+}\n+\n+static void vvu_process_status_change(struct vvu_socket *vvu_socket, bool slave_up,\n+\t\t\t\t      bool master_up)\n+{\n+\tRTE_LOG(DEBUG, VHOST_CONFIG, \"%s slave_up %d master_up %d\\n\",\n+\t\t__func__, slave_up, master_up);\n+\n+\t/* Disconnected from the master, try reconnecting */\n+\tif (!slave_up) {\n+\t\tvvu_reconnect(vvu_socket);\n+\t\treturn;\n+\t}\n+\n+\tif (master_up && !vvu_socket->conn) {\n+\t\tvvu_process_new_connection(vvu_socket);\n+\t\treturn;\n+\t}\n+}\n+\n+static void\n+vvu_process_txq(struct vvu_socket *vvu_socket)\n+{\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tstruct virtqueue *vq = hw->vqs[VVU_VQ_TX];\n+\tuint16_t n = VIRTQUEUE_NUSED(vq);\n+\n+\tvirtio_rmb();\n+\n+\t/* Just mark the buffers complete */\n+\tvq->vq_used_cons_idx += n;\n+\tvq->vq_free_cnt += n;\n+}\n+\n+static void\n+vvu_process_rxq(struct vvu_socket *vvu_socket)\n+{\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tstruct virtqueue *vq = hw->vqs[VVU_VQ_RX];\n+\tbool refilled = false;\n+\n+\twhile (VIRTQUEUE_NUSED(vq)) {\n+\t\tstruct vring_used_elem *uep;\n+\t\tVhostUserMsg *msg;\n+\t\tuint32_t len;\n+\t\tuint32_t desc_idx;\n+\t\tuint16_t used_idx;\n+\t\tsize_t i;\n+\n+\t\tvirtio_rmb();\n+\n+\t\tused_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));\n+\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tdesc_idx = rte_le_to_cpu_32(uep->id);\n+\n+\t\tmsg = vq->vq_descx[desc_idx].cookie;\n+\t\tlen = rte_le_to_cpu_32(uep->len);\n+\n+\t\tif (msg->size > sizeof(VhostUserMsg) ||\n+\t\t    len != VHOST_USER_HDR_SIZE + msg->size) {\n+\t\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\t\"Invalid vhost-user message size %u, got %u bytes\\n\",\n+\t\t\t\tmsg->size, len);\n+\t\t\t/* TODO reconnect */\n+\t\t\tabort();\n+\t\t}\n+\n+\t\tRTE_LOG(DEBUG, VHOST_CONFIG,\n+\t\t\t\"%s request %u flags %#x size %u\\n\",\n+\t\t\t__func__, msg->request.master,\n+\t\t\tmsg->flags, msg->size);\n+\n+\t\t/* Mark file descriptors invalid */\n+\t\tfor (i = 0; i < RTE_DIM(msg->fds); i++)\n+\t\t\tmsg->fds[i] = VIRTIO_INVALID_EVENTFD;\n+\n+\t\t/* Only process messages while connected */\n+\t\tif (vvu_socket->conn) {\n+\t\t\tif (vhost_user_msg_handler(vvu_socket->conn->device.vid,\n+\t\t\t\t\t\t   msg) < 0) {\n+\t\t\t\t/* TODO reconnect */\n+\t\t\t\tabort();\n+\t\t\t}\n+\t\t}\n+\n+\t\tvq->vq_used_cons_idx++;\n+\n+\t\t/* Refill rxq */\n+\t\tvq_update_avail_ring(vq, desc_idx);\n+\t\tvq_update_avail_idx(vq);\n+\t\trefilled = true;\n+\t}\n+\n+\tif (!refilled)\n+\t\treturn;\n+\tif (virtqueue_kick_prepare(vq))\n+\t\tvirtqueue_notify(vq);\n+}\n+\n+/* TODO Audit thread safety.  There are 3 threads involved:\n+ * 1. The main process thread that calls librte_vhost APIs during startup.\n+ * 2. The interrupt thread that calls vvu_interrupt_handler().\n+ * 3. Packet processing threads (lcores) calling librte_vhost APIs.\n+ *\n+ * It may be necessary to use locks if any of these code paths can race.  The\n+ * librte_vhost API entry points already do some locking but this needs to be\n+ * checked.\n+ */\n+static void\n+vvu_interrupt_handler(void *cb_arg)\n+{\n+\tstruct vvu_socket *vvu_socket = cb_arg;\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tstruct rte_intr_handle *intr_handle = &vvu_socket->pdev->pci_dev->intr_handle;\n+\tuint8_t isr;\n+\n+\t/* Read Interrupt Status Register (which also clears it) */\n+\tisr = VTPCI_OPS(hw)->get_isr(hw);\n+\n+\tif (isr & VIRTIO_PCI_ISR_CONFIG) {\n+\t\tuint32_t status;\n+\t\tbool slave_up;\n+\t\tbool master_up;\n+\n+\t\tvirtio_pci_read_dev_config(hw,\n+\t\t\t\toffsetof(struct virtio_vhost_user_config, status),\n+\t\t\t\t&status, sizeof(status));\n+\t\tstatus = rte_le_to_cpu_32(status);\n+\n+\t\tRTE_LOG(DEBUG, VHOST_CONFIG, \"%s isr %#x status %#x\\n\", __func__, isr, status);\n+\n+\t\tslave_up = status & (1u << VIRTIO_VHOST_USER_STATUS_SLAVE_UP);\n+\t\tmaster_up = status & (1u << VIRTIO_VHOST_USER_STATUS_MASTER_UP);\n+\t\tvvu_process_status_change(vvu_socket, slave_up, master_up);\n+\t} else\n+\t\tRTE_LOG(DEBUG, VHOST_CONFIG, \"%s isr %#x\\n\", __func__, isr);\n+\n+\t/* Re-arm before processing virtqueues so no interrupts are lost */\n+\trte_intr_enable(intr_handle);\n+\n+\tvvu_process_txq(vvu_socket);\n+\tvvu_process_rxq(vvu_socket);\n+}\n+\n+static int\n+vvu_virtio_pci_init_rxq(struct vvu_socket *vvu_socket)\n+{\n+\tchar name[sizeof(\"0000:00:00.00 vq 0 rxbufs\")];\n+\tstruct virtqueue *vq;\n+\tsize_t size;\n+\tsize_t align;\n+\tint i;\n+\n+\tvq = vvu_socket->pdev->hw.vqs[VVU_VQ_RX];\n+\n+\tsnprintf(name, sizeof(name), \"%s vq %u rxbufs\",\n+\t\t vvu_socket->pdev->pci_dev->device.name, VVU_VQ_RX);\n+\n+\t/* Allocate more than sizeof(VhostUserMsg) so there is room to grow */\n+\tsize = vq->vq_nentries * VVU_RXBUF_SIZE;\n+\talign = 1024;\n+\tvvu_socket->rxbuf_mz = rte_memzone_reserve_aligned(name, size, SOCKET_ID_ANY,\n+\t\t\t\t\t\t\t   0, align);\n+\tif (!vvu_socket->rxbuf_mz) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Failed to allocate rxbuf memzone\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < vq->vq_nentries; i++) {\n+\t\tstruct vring_desc *desc = &vq->vq_ring.desc[i];\n+\t\tstruct vq_desc_extra *descx = &vq->vq_descx[i];\n+\n+\t\tdesc->addr = rte_cpu_to_le_64(vvu_socket->rxbuf_mz->iova +\n+\t\t\t\t              i * VVU_RXBUF_SIZE);\n+\t\tdesc->len = RTE_LE32(VVU_RXBUF_SIZE);\n+\t\tdesc->flags = RTE_LE16(VRING_DESC_F_WRITE);\n+\n+\t\tdescx->cookie = (uint8_t *)vvu_socket->rxbuf_mz->addr + i * VVU_RXBUF_SIZE;\n+\t\tdescx->ndescs = 1;\n+\n+\t\tvq_update_avail_ring(vq, i);\n+\t\tvq->vq_free_cnt--;\n+\t}\n+\n+\tvq_update_avail_idx(vq);\n+\tvirtqueue_notify(vq);\n+\treturn 0;\n+}\n+\n+static int\n+vvu_virtio_pci_init_txq(struct vvu_socket *vvu_socket)\n+{\n+\tchar name[sizeof(\"0000:00:00.00 vq 0 txbufs\")];\n+\tstruct virtqueue *vq;\n+\tsize_t size;\n+\tsize_t align;\n+\n+\tvq = vvu_socket->pdev->hw.vqs[VVU_VQ_TX];\n+\n+\tsnprintf(name, sizeof(name), \"%s vq %u txbufs\",\n+\t\t vvu_socket->pdev->pci_dev->device.name, VVU_VQ_TX);\n+\n+\t/* Allocate more than sizeof(VhostUserMsg) so there is room to grow */\n+\tsize = vq->vq_nentries * VVU_TXBUF_SIZE;\n+\talign = 1024;\n+\tvvu_socket->txbuf_mz = rte_memzone_reserve_aligned(name, size, SOCKET_ID_ANY,\n+\t\t\t\t\t\t\t   0, align);\n+\tif (!vvu_socket->txbuf_mz) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Failed to allocate txbuf memzone\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tvvu_socket->txbuf_idx = 0;\n+\treturn 0;\n+}\n+\n+static void\n+virtio_init_vring(struct virtqueue *vq)\n+{\n+\tint size = vq->vq_nentries;\n+\tstruct vring *vr = &vq->vq_ring;\n+\tuint8_t *ring_mem = vq->vq_ring_virt_mem;\n+\n+\tmemset(ring_mem, 0, vq->vq_ring_size);\n+\tvring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);\n+\tvq->vq_used_cons_idx = 0;\n+\tvq->vq_desc_head_idx = 0;\n+\tvq->vq_avail_idx = 0;\n+\tvq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);\n+\tvq->vq_free_cnt = vq->vq_nentries;\n+\tmemset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);\n+\n+\tvring_desc_init(vr->desc, size);\n+\tvirtqueue_enable_intr(vq);\n+}\n+\n+static int\n+vvu_virtio_pci_init_vq(struct vvu_socket *vvu_socket, int vq_idx)\n+{\n+\tchar vq_name[sizeof(\"0000:00:00.00 vq 0\")];\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct virtqueue *vq;\n+\tuint16_t q_num;\n+\tsize_t size;\n+\n+\tq_num = VTPCI_OPS(hw)->get_queue_num(hw, vq_idx);\n+\tRTE_LOG(DEBUG, VHOST_CONFIG, \"vq %d q_num: %u\\n\", vq_idx, q_num);\n+\tif (q_num == 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"virtqueue %d does not exist\\n\",\n+\t\t\tvq_idx);\n+\t\treturn -1;\n+\t}\n+\n+\tif (!rte_is_power_of_2(q_num)) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"virtqueue %d has non-power of 2 size (%u)\\n\",\n+\t\t\tvq_idx, q_num);\n+\t\treturn -1;\n+\t}\n+\n+\tsnprintf(vq_name, sizeof(vq_name), \"%s vq %u\",\n+\t\t vvu_socket->pdev->pci_dev->device.name, vq_idx);\n+\n+\tsize = RTE_ALIGN_CEIL(sizeof(*vq) +\n+\t\t\t      q_num * sizeof(struct vq_desc_extra),\n+\t\t\t      RTE_CACHE_LINE_SIZE);\n+\tvq = rte_zmalloc(vq_name, size, RTE_CACHE_LINE_SIZE);\n+\tif (!vq) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Failed to allocated virtqueue %d\\n\", vq_idx);\n+\t\treturn -1;\n+\t}\n+\thw->vqs[vq_idx] = vq;\n+\n+\tvq->hw = hw;\n+\tvq->vq_queue_index = vq_idx;\n+\tvq->vq_nentries = q_num;\n+\n+\tsize = vring_size(q_num, VIRTIO_PCI_VRING_ALIGN);\n+\tvq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);\n+\n+\tmz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,\n+\t\t\t\t\t SOCKET_ID_ANY, 0,\n+\t\t\t\t\t VIRTIO_PCI_VRING_ALIGN);\n+\tif (mz == NULL) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Failed to reserve memzone for virtqueue %d\\n\",\n+\t\t\tvq_idx);\n+\t\tgoto err_vq;\n+\t}\n+\n+\tmemset(mz->addr, 0, mz->len);\n+\n+\tvq->mz = mz;\n+\tvq->vq_ring_mem = mz->iova;\n+\tvq->vq_ring_virt_mem = mz->addr;\n+\tvirtio_init_vring(vq);\n+\n+\tif (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0)\n+\t\tgoto err_mz;\n+\n+\treturn 0;\n+\n+err_mz:\n+\trte_memzone_free(mz);\n+\n+err_vq:\n+\thw->vqs[vq_idx] = NULL;\n+\trte_free(vq);\n+\treturn -1;\n+}\n+\n+static void\n+vvu_virtio_pci_free_virtqueues(struct vvu_socket *vvu_socket)\n+{\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tint i;\n+\n+\tif (vvu_socket->rxbuf_mz) {\n+\t\trte_memzone_free(vvu_socket->rxbuf_mz);\n+\t\tvvu_socket->rxbuf_mz = NULL;\n+\t}\n+\tif (vvu_socket->txbuf_mz) {\n+\t\trte_memzone_free(vvu_socket->txbuf_mz);\n+\t\tvvu_socket->txbuf_mz = NULL;\n+\t}\n+\n+\tfor (i = 0; i < VVU_VQ_MAX; i++) {\n+\t\tstruct virtqueue *vq = hw->vqs[i];\n+\n+\t\tif (!vq)\n+\t\t\tcontinue;\n+\n+\t\trte_memzone_free(vq->mz);\n+\t\trte_free(vq);\n+\t\thw->vqs[i] = NULL;\n+\t}\n+\n+\trte_free(hw->vqs);\n+\thw->vqs = NULL;\n+}\n+\n+static void\n+vvu_virtio_pci_intr_cleanup(struct vvu_socket *vvu_socket)\n+{\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tstruct rte_intr_handle *intr_handle = &vvu_socket->pdev->pci_dev->intr_handle;\n+\tint i;\n+\n+\tfor (i = 0; i < VVU_VQ_MAX; i++)\n+\t\tVTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i],\n+\t\t\t\t\t     VIRTIO_MSI_NO_VECTOR);\n+\tVTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);\n+\trte_intr_disable(intr_handle);\n+\trte_intr_callback_unregister(intr_handle, vvu_interrupt_handler, vvu_socket);\n+\trte_intr_efd_disable(intr_handle);\n+}\n+\n+static int\n+vvu_virtio_pci_init_intr(struct vvu_socket *vvu_socket)\n+{\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tstruct rte_intr_handle *intr_handle = &vvu_socket->pdev->pci_dev->intr_handle;\n+\tint i;\n+\n+\tif (!rte_intr_cap_multiple(intr_handle)) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Multiple intr vector not supported\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_intr_efd_enable(intr_handle, VVU_VQ_MAX) < 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Failed to create eventfds\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_intr_callback_register(intr_handle, vvu_interrupt_handler, vvu_socket) < 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Failed to register interrupt callback\\n\");\n+\t\tgoto err_efd;\n+\t}\n+\n+\tif (rte_intr_enable(intr_handle) < 0)\n+\t\tgoto err_callback;\n+\n+\tif (VTPCI_OPS(hw)->set_config_irq(hw, 0) == VIRTIO_MSI_NO_VECTOR) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Failed to set config MSI-X vector\\n\");\n+\t\tgoto err_enable;\n+\t}\n+\n+\t/* TODO use separate vectors and interrupt handler functions.  It seems\n+\t * <rte_interrupts.h> doesn't allow efds to have interrupt_handler\n+\t * functions and it just clears efds when they are raised.  As a\n+\t * workaround we use the configuration change interrupt for virtqueue\n+\t * interrupts!\n+\t */\n+\tfor (i = 0; i < VVU_VQ_MAX; i++) {\n+\t\tif (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i], 0) ==\n+\t\t\t\tVIRTIO_MSI_NO_VECTOR) {\n+\t\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\t\"Failed to set virtqueue MSI-X vector\\n\");\n+\t\t\tgoto err_vq;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+err_vq:\n+\tfor (i = 0; i < VVU_VQ_MAX; i++)\n+\t\tVTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i],\n+\t\t\t\t\t     VIRTIO_MSI_NO_VECTOR);\n+\tVTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);\n+err_enable:\n+\trte_intr_disable(intr_handle);\n+err_callback:\n+\trte_intr_callback_unregister(intr_handle, vvu_interrupt_handler, vvu_socket);\n+err_efd:\n+\trte_intr_efd_disable(intr_handle);\n+\treturn -1;\n+}\n+\n+static int\n+vvu_virtio_pci_init_bar(struct vvu_socket *vvu_socket)\n+{\n+\tstruct rte_pci_device *pci_dev = vvu_socket->pdev->pci_dev;\n+\tstruct virtio_net *dev = NULL; /* just for sizeof() */\n+\n+\tvvu_socket->doorbells = pci_dev->mem_resource[2].addr;\n+\tif (!vvu_socket->doorbells) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"BAR 2 not available\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* The number of doorbells is max_vhost_queues + 1 */\n+\tvirtio_pci_read_dev_config(&vvu_socket->pdev->hw,\n+\t\t\toffsetof(struct virtio_vhost_user_config,\n+\t\t\t\t max_vhost_queues),\n+\t\t\t&vvu_socket->max_vhost_queues,\n+\t\t\tsizeof(vvu_socket->max_vhost_queues));\n+\tvvu_socket->max_vhost_queues = rte_le_to_cpu_32(vvu_socket->max_vhost_queues);\n+\tif (vvu_socket->max_vhost_queues < RTE_DIM(dev->virtqueue)) {\n+\t\t/* We could support devices with a smaller max number of\n+\t\t * virtqueues than dev->virtqueue[] in the future.  Fail early\n+\t\t * for now since the current assumption is that all of\n+\t\t * dev->virtqueue[] can be used.\n+\t\t */\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Device supports fewer virtqueues than driver!\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+vvu_virtio_pci_init(struct vvu_socket *vvu_socket)\n+{\n+\tuint64_t host_features;\n+\tstruct virtio_hw *hw = &vvu_socket->pdev->hw;\n+\tint i;\n+\n+\tvirtio_pci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);\n+\tvirtio_pci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);\n+\n+\thw->guest_features = VVU_VIRTIO_FEATURES;\n+\thost_features = VTPCI_OPS(hw)->get_features(hw);\n+\thw->guest_features = virtio_pci_negotiate_features(hw, host_features);\n+\n+\tif (!virtio_pci_with_feature(hw, VIRTIO_F_VERSION_1)) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Missing VIRTIO 1 feature bit\\n\");\n+\t\tgoto err;\n+\t}\n+\n+\tvirtio_pci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);\n+\tif (!(virtio_pci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG, \"Failed to set FEATURES_OK\\n\");\n+\t\tgoto err;\n+\t}\n+\n+\tif (vvu_virtio_pci_init_bar(vvu_socket) < 0)\n+\t\tgoto err;\n+\n+\thw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * VVU_VQ_MAX, 0);\n+\tif (!hw->vqs)\n+\t\tgoto err;\n+\n+\tfor (i = 0; i < VVU_VQ_MAX; i++) {\n+\t\tif (vvu_virtio_pci_init_vq(vvu_socket, i) < 0) {\n+\t\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\t\"virtqueue %u init failed\\n\", i);\n+\t\t\tgoto err_init_vq;\n+\t\t}\n+\t}\n+\n+\tif (vvu_virtio_pci_init_rxq(vvu_socket) < 0)\n+\t\tgoto err_init_vq;\n+\n+\tif (vvu_virtio_pci_init_txq(vvu_socket) < 0)\n+\t\tgoto err_init_vq;\n+\n+\tif (vvu_virtio_pci_init_intr(vvu_socket) < 0)\n+\t\tgoto err_init_vq;\n+\n+\tvirtio_pci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);\n+\n+\treturn 0;\n+\n+err_init_vq:\n+\tvvu_virtio_pci_free_virtqueues(vvu_socket);\n+\n+err:\n+\tvirtio_pci_reset(hw);\n+\tRTE_LOG(DEBUG, VHOST_CONFIG, \"%s failed\\n\", __func__);\n+\treturn -1;\n+}\n+\n+static int\n+vvu_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t      struct rte_pci_device *pci_dev)\n+{\n+\tstruct vvu_pci_device *pdev;\n+\n+\t/* TODO support multi-process applications */\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"virtio-vhost-pci does not support multi-process \"\n+\t\t\t\"applications\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tpdev = rte_zmalloc_socket(pci_dev->device.name, sizeof(*pdev),\n+\t\t\t\t  RTE_CACHE_LINE_SIZE,\n+\t\t\t\t  pci_dev->device.numa_node);\n+\tif (!pdev)\n+\t\treturn -1;\n+\n+\tpdev->pci_dev = pci_dev;\n+\n+\tif (virtio_pci_init(pci_dev, &pdev->hw) != 0) {\n+\t\trte_free(pdev);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Reset the device now, the rest is done in vvu_socket_init() */\n+\tvirtio_pci_reset(&pdev->hw);\n+\n+\tif (pdev->hw.use_msix == VIRTIO_MSIX_NONE) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"MSI-X is required for PCI device at %s\\n\",\n+\t\t\tpci_dev->device.name);\n+\t\trte_free(pdev);\n+\t\trte_pci_unmap_device(pci_dev);\n+\t\treturn -1;\n+\t}\n+\n+\tTAILQ_INSERT_TAIL(&vvu_pci_device_list, pdev, next);\n+\n+\tRTE_LOG(INFO, VHOST_CONFIG,\n+\t\t\"Added virtio-vhost-user device at %s\\n\",\n+\t\tpci_dev->device.name);\n+\n+\treturn 0;\n+}\n+\n+static int\n+vvu_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct vvu_pci_device *pdev;\n+\n+\tTAILQ_FOREACH(pdev, &vvu_pci_device_list, next)\n+\t\tif (pdev->pci_dev == pci_dev)\n+\t\t\tbreak;\n+\tif (!pdev)\n+\t\treturn -1;\n+\n+\tif (pdev->vvu_socket) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Cannot remove PCI device at %s with vhost still attached\\n\",\n+\t\t\tpci_dev->device.name);\n+\t\treturn -1;\n+\t}\n+\n+\tTAILQ_REMOVE(&vvu_pci_device_list, pdev, next);\n+\trte_free(pdev);\n+\trte_pci_unmap_device(pci_dev);\n+\treturn 0;\n+}\n+\n+static const struct rte_pci_id pci_id_vvu_map[] = {\n+\t{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID,\n+\t\t\t VIRTIO_PCI_LEGACY_DEVICEID_VHOST_USER) },\n+\t{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID,\n+\t\t\t VIRTIO_PCI_MODERN_DEVICEID_VHOST_USER) },\n+\t{ .vendor_id = 0, /* sentinel */ },\n+};\n+\n+static struct rte_pci_driver vvu_pci_driver = {\n+\t.driver = {\n+\t\t.name = \"virtio_vhost_user\",\n+\t},\n+\t.id_table = pci_id_vvu_map,\n+\t.drv_flags = 0,\n+\t.probe = vvu_pci_probe,\n+\t.remove = vvu_pci_remove,\n+};\n+\n+RTE_INIT(vvu_pci_init);\n+static void\n+vvu_pci_init(void)\n+{\n+\tif (rte_eal_iopl_init() != 0) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"IOPL call failed - cannot use virtio-vhost-user\\n\");\n+\t\treturn;\n+\t}\n+\n+\trte_pci_register(&vvu_pci_driver);\n+}\n+\n+static int\n+vvu_socket_init(struct vhost_user_socket *vsocket, uint64_t flags)\n+{\n+\tstruct vvu_socket *vvu_socket =\n+\t\tcontainer_of(vsocket, struct vvu_socket, socket);\n+\tstruct vvu_pci_device *pdev;\n+\n+\tif (flags & RTE_VHOST_USER_NO_RECONNECT) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"error: reconnect cannot be disabled for virtio-vhost-user\\n\");\n+\t\treturn -1;\n+\t}\n+\tif (flags & RTE_VHOST_USER_CLIENT) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"error: virtio-vhost-user does not support client mode\\n\");\n+\t\treturn -1;\n+\t}\n+\tif (flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"error: virtio-vhost-user does not support dequeue-zero-copy\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tpdev = vvu_pci_by_name(vsocket->path);\n+\tif (!pdev) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Cannot find virtio-vhost-user PCI device at %s\\n\",\n+\t\t\tvsocket->path);\n+\t\treturn -1;\n+\t}\n+\n+\tif (pdev->vvu_socket) {\n+\t\tRTE_LOG(ERR, VHOST_CONFIG,\n+\t\t\t\"Device at %s is already in use\\n\",\n+\t\t\tvsocket->path);\n+\t\treturn -1;\n+\t}\n+\n+\tvvu_socket->pdev = pdev;\n+\tpdev->vvu_socket = vvu_socket;\n+\n+\tif (vvu_virtio_pci_init(vvu_socket) < 0) {\n+\t\tvvu_socket->pdev = NULL;\n+\t\tpdev->vvu_socket = NULL;\n+\t\treturn -1;\n+\t}\n+\n+\tRTE_LOG(INFO, VHOST_CONFIG, \"%s at %s\\n\", __func__, vsocket->path);\n+\treturn 0;\n+}\n+\n+static void\n+vvu_socket_cleanup(struct vhost_user_socket *vsocket)\n+{\n+\tstruct vvu_socket *vvu_socket =\n+\t\tcontainer_of(vsocket, struct vvu_socket, socket);\n+\n+\tif (vvu_socket->conn)\n+\t\tvhost_destroy_device(vvu_socket->conn->device.vid);\n+\n+\tvvu_virtio_pci_intr_cleanup(vvu_socket);\n+\tvirtio_pci_reset(&vvu_socket->pdev->hw);\n+\tvvu_virtio_pci_free_virtqueues(vvu_socket);\n+\n+\tvvu_socket->pdev->vvu_socket = NULL;\n+\tvvu_socket->pdev = NULL;\n+}\n+\n+static int\n+vvu_socket_start(struct vhost_user_socket *vsocket)\n+{\n+\tstruct vvu_socket *vvu_socket =\n+\t\tcontainer_of(vsocket, struct vvu_socket, socket);\n+\n+\tvvu_connect(vvu_socket);\n+\treturn 0;\n+}\n+\n+const struct vhost_transport_ops virtio_vhost_user_trans_ops = {\n+\t.socket_size = sizeof(struct vvu_socket),\n+\t.device_size = sizeof(struct vvu_connection),\n+\t.socket_init = vvu_socket_init,\n+\t.socket_cleanup = vvu_socket_cleanup,\n+\t.socket_start = vvu_socket_start,\n+\t.cleanup_device = vvu_cleanup_device,\n+\t.vring_call = vvu_vring_call,\n+\t.send_reply = vvu_send_reply,\n+\t.map_mem_regions = vvu_map_mem_regions,\n+\t.unmap_mem_regions = vvu_unmap_mem_regions,\n+};\ndiff --git a/drivers/virtio_vhost_user/virtio_vhost_user.h b/drivers/virtio_vhost_user/virtio_vhost_user.h\nnew file mode 100644\nindex 0000000..baeaa74\n--- /dev/null\n+++ b/drivers/virtio_vhost_user/virtio_vhost_user.h\n@@ -0,0 +1,18 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (C) 2018 Red Hat, Inc.\n+ */\n+\n+#ifndef _LINUX_VIRTIO_VHOST_USER_H\n+#define _LINUX_VIRTIO_VHOST_USER_H\n+\n+#include <stdint.h>\n+\n+struct virtio_vhost_user_config {\n+    uint32_t status;\n+#define VIRTIO_VHOST_USER_STATUS_SLAVE_UP 0\n+#define VIRTIO_VHOST_USER_STATUS_MASTER_UP 1\n+    uint32_t max_vhost_queues;\n+    uint8_t uuid[16];\n+};\n+\n+#endif /* _LINUX_VIRTIO_VHOST_USER_H */\n",
    "prefixes": [
        "20/28"
    ]
}