get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/105359/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 105359,
    "url": "http://patches.dpdk.org/api/patches/105359/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211223083659.245766-4-maxime.coquelin@redhat.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211223083659.245766-4-maxime.coquelin@redhat.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211223083659.245766-4-maxime.coquelin@redhat.com",
    "date": "2021-12-23T08:36:55",
    "name": "[3/7] vhost: improve socket layer logs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7925188501ceb5eca84cbd1b1f66601bde84cb88",
    "submitter": {
        "id": 512,
        "url": "http://patches.dpdk.org/api/people/512/?format=api",
        "name": "Maxime Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211223083659.245766-4-maxime.coquelin@redhat.com/mbox/",
    "series": [
        {
            "id": 21013,
            "url": "http://patches.dpdk.org/api/series/21013/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21013",
            "date": "2021-12-23T08:36:52",
            "name": "vhost: improve logging",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/21013/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/105359/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/105359/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C5F1CA0350;\n\tThu, 23 Dec 2021 09:37:44 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B799E41152;\n\tThu, 23 Dec 2021 09:37:37 +0100 (CET)",
            "from us-smtp-delivery-124.mimecast.com\n (us-smtp-delivery-124.mimecast.com [170.10.133.124])\n by mails.dpdk.org (Postfix) with ESMTP id D888841142\n for <dev@dpdk.org>; Thu, 23 Dec 2021 09:37:35 +0100 (CET)",
            "from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com\n [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS\n (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n us-mta-486-reaFOoaOP1a0FO5uNNNLYw-1; Thu, 23 Dec 2021 03:37:33 -0500",
            "from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com\n [10.5.11.11])\n (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n (No client certificate requested)\n by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 9A17E344AF;\n Thu, 23 Dec 2021 08:37:32 +0000 (UTC)",
            "from max-t490s.redhat.com (unknown [10.39.208.14])\n by smtp.corp.redhat.com (Postfix) with ESMTP id 698A660BD8;\n Thu, 23 Dec 2021 08:37:31 +0000 (UTC)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1640248655;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:cc:mime-version:mime-version:content-type:content-type:\n content-transfer-encoding:content-transfer-encoding:\n in-reply-to:in-reply-to:references:references;\n bh=ScOTJKSy5HnU+4Eq0/8FGmCWp3zPURtjfjI0dwg0yj8=;\n b=iVkPTxL2bJAlY3J+VkQzydvPCnue0quRLQWuMijdRrMWEDfdfWPtukHRqorgKfrEit7a+n\n AbThO5W3+QDjVZcABPrg8X9NjOUe7u0XAR/yizS4bac4oth9C+HONjyxuLc6jtvI7fPqYU\n sX51vBbBEjMNuuQKjcGaiJDxZrVkMMM=",
        "X-MC-Unique": "reaFOoaOP1a0FO5uNNNLYw-1",
        "From": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "To": "dev@dpdk.org,\n\tchenbo.xia@intel.com,\n\tdavid.marchand@redhat.com",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "Subject": "[PATCH 3/7] vhost: improve socket layer logs",
        "Date": "Thu, 23 Dec 2021 09:36:55 +0100",
        "Message-Id": "<20211223083659.245766-4-maxime.coquelin@redhat.com>",
        "In-Reply-To": "<20211223083659.245766-1-maxime.coquelin@redhat.com>",
        "References": "<20211223083659.245766-1-maxime.coquelin@redhat.com>",
        "MIME-Version": "1.0",
        "X-Scanned-By": "MIMEDefang 2.79 on 10.5.11.11",
        "Authentication-Results": "relay.mimecast.com;\n auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com",
        "X-Mimecast-Spam-Score": "0",
        "X-Mimecast-Originator": "redhat.com",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain; charset=\"US-ASCII\"",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This patch adds the Vhost socket path whenever possible in\norder to make debugging possible when multiple Vhost\ndevices are in use.\n\nSigned-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/vhost/socket.c | 129 ++++++++++++++++++---------------------------\n 1 file changed, 51 insertions(+), 78 deletions(-)",
    "diff": "diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c\nindex 82963c1e6d..fd6b3a3ee4 100644\n--- a/lib/vhost/socket.c\n+++ b/lib/vhost/socket.c\n@@ -124,12 +124,12 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds,\n \tret = recvmsg(sockfd, &msgh, 0);\n \tif (ret <= 0) {\n \t\tif (ret)\n-\t\t\tVHOST_LOG_CONFIG(ERR, \"recvmsg failed\\n\");\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"recvmsg failed (fd %d)\\n\", sockfd);\n \t\treturn ret;\n \t}\n \n \tif (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"truncated msg\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"truncated msg (fd %d)\\n\", sockfd);\n \t\treturn -1;\n \t}\n \n@@ -192,7 +192,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)\n \t} while (ret < 0 && errno == EINTR);\n \n \tif (ret < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,  \"sendmsg error\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR,  \"sendmsg error (fd %d)\\n\", sockfd);\n \t\treturn ret;\n \t}\n \n@@ -243,14 +243,14 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)\n \t\t\tdev->async_copy = 1;\n \t}\n \n-\tVHOST_LOG_CONFIG(INFO, \"new device, handle is %d, path is %s\\n\", vid, vsocket->path);\n+\tVHOST_LOG_CONFIG(INFO, \"(%s) new device, handle is %d\\n\", vsocket->path, vid);\n \n \tif (vsocket->notify_ops->new_connection) {\n \t\tret = vsocket->notify_ops->new_connection(vid);\n \t\tif (ret < 0) {\n \t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"failed to add vhost user connection with fd %d\\n\",\n-\t\t\t\tfd);\n+\t\t\t\t\"(%s) failed to add vhost user connection with fd %d\\n\",\n+\t\t\t\tvsocket->path, fd);\n \t\t\tgoto err_cleanup;\n \t\t}\n \t}\n@@ -261,9 +261,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)\n \tret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,\n \t\t\tNULL, conn);\n \tif (ret < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"failed to add fd %d into vhost server fdset\\n\",\n-\t\t\tfd);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to add fd %d into vhost server fdset\\n\",\n+\t\t\tvsocket->path, fd);\n \n \t\tif (vsocket->notify_ops->destroy_connection)\n \t\t\tvsocket->notify_ops->destroy_connection(conn->vid);\n@@ -295,7 +294,8 @@ vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)\n \tif (fd < 0)\n \t\treturn;\n \n-\tVHOST_LOG_CONFIG(INFO, \"new vhost user connection is %d\\n\", fd);\n+\tVHOST_LOG_CONFIG(INFO, \"(%s) new vhost user connection is %d\\n\",\n+\t\t\tvsocket->path, fd);\n \tvhost_user_add_connection(fd, vsocket);\n }\n \n@@ -343,13 +343,13 @@ create_unix_socket(struct vhost_user_socket *vsocket)\n \tfd = socket(AF_UNIX, SOCK_STREAM, 0);\n \tif (fd < 0)\n \t\treturn -1;\n-\tVHOST_LOG_CONFIG(INFO, \"vhost-user %s: socket created, fd: %d\\n\",\n-\t\tvsocket->is_server ? \"server\" : \"client\", fd);\n+\tVHOST_LOG_CONFIG(INFO, \"(%s) vhost-user %s: socket created, fd: %d\\n\",\n+\t\tvsocket->path, vsocket->is_server ? \"server\" : \"client\", fd);\n \n \tif (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {\n \t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"vhost-user: can't set nonblocking mode for socket, fd: \"\n-\t\t\t\"%d (%s)\\n\", fd, strerror(errno));\n+\t\t\t\"(%s) vhost-user: can't set nonblocking mode for socket, fd: %d (%s)\\n\",\n+\t\t\tvsocket->path, fd, strerror(errno));\n \t\tclose(fd);\n \t\treturn -1;\n \t}\n@@ -382,12 +382,11 @@ vhost_user_start_server(struct vhost_user_socket *vsocket)\n \t */\n \tret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));\n \tif (ret < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"failed to bind to %s: %s; remove it and try again\\n\",\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to bind: %s; remove it and try again\\n\",\n \t\t\tpath, strerror(errno));\n \t\tgoto err;\n \t}\n-\tVHOST_LOG_CONFIG(INFO, \"bind to %s\\n\", path);\n+\tVHOST_LOG_CONFIG(INFO, \"(%s) binding succeeded\\n\", path);\n \n \tret = listen(fd, MAX_VIRTIO_BACKLOG);\n \tif (ret < 0)\n@@ -397,8 +396,8 @@ vhost_user_start_server(struct vhost_user_socket *vsocket)\n \t\t  NULL, vsocket);\n \tif (ret < 0) {\n \t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"failed to add listen fd %d to vhost server fdset\\n\",\n-\t\t\tfd);\n+\t\t\t\"(%s) failed to add listen fd %d to vhost server fdset\\n\",\n+\t\t\tpath, fd);\n \t\tgoto err;\n \t}\n \n@@ -437,13 +436,11 @@ vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)\n \n \tflags = fcntl(fd, F_GETFL, 0);\n \tif (flags < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"can't get flags for connfd %d\\n\", fd);\n+\t\tVHOST_LOG_CONFIG(ERR, \"can't get flags for connfd %d\\n\", fd);\n \t\treturn -2;\n \t}\n \tif ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"can't disable nonblocking on fd %d\\n\", fd);\n+\t\tVHOST_LOG_CONFIG(ERR, \"can't disable nonblocking on fd %d\\n\", fd);\n \t\treturn -2;\n \t}\n \treturn 0;\n@@ -471,16 +468,14 @@ vhost_user_client_reconnect(void *arg __rte_unused)\n \t\t\t\t\t\tsizeof(reconn->un));\n \t\t\tif (ret == -2) {\n \t\t\t\tclose(reconn->fd);\n-\t\t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\t\"reconnection for fd %d failed\\n\",\n-\t\t\t\t\treconn->fd);\n+\t\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) reconnection for fd %d failed\\n\",\n+\t\t\t\t\treconn->vsocket->path, reconn->fd);\n \t\t\t\tgoto remove_fd;\n \t\t\t}\n \t\t\tif (ret == -1)\n \t\t\t\tcontinue;\n \n-\t\t\tVHOST_LOG_CONFIG(INFO,\n-\t\t\t\t\"%s: connected\\n\", reconn->vsocket->path);\n+\t\t\tVHOST_LOG_CONFIG(INFO, \"%s: connected\\n\", reconn->vsocket->path);\n \t\t\tvhost_user_add_connection(reconn->fd, reconn->vsocket);\n remove_fd:\n \t\t\tTAILQ_REMOVE(&reconn_list.head, reconn, next);\n@@ -510,10 +505,8 @@ vhost_user_reconnect_init(void)\n \t\t\t     vhost_user_client_reconnect, NULL);\n \tif (ret != 0) {\n \t\tVHOST_LOG_CONFIG(ERR, \"failed to create reconnect thread\");\n-\t\tif (pthread_mutex_destroy(&reconn_list.mutex)) {\n-\t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"failed to destroy reconnect mutex\");\n-\t\t}\n+\t\tif (pthread_mutex_destroy(&reconn_list.mutex))\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"failed to destroy reconnect mutex\");\n \t}\n \n \treturn ret;\n@@ -534,20 +527,17 @@ vhost_user_start_client(struct vhost_user_socket *vsocket)\n \t\treturn 0;\n \t}\n \n-\tVHOST_LOG_CONFIG(WARNING,\n-\t\t\"failed to connect to %s: %s\\n\",\n-\t\tpath, strerror(errno));\n+\tVHOST_LOG_CONFIG(WARNING, \"(%s) failed to connect: %s\\n\", path, strerror(errno));\n \n \tif (ret == -2 || !vsocket->reconnect) {\n \t\tclose(fd);\n \t\treturn -1;\n \t}\n \n-\tVHOST_LOG_CONFIG(INFO, \"%s: reconnecting...\\n\", path);\n+\tVHOST_LOG_CONFIG(INFO, \"(%s) reconnecting...\\n\", path);\n \treconn = malloc(sizeof(*reconn));\n \tif (reconn == NULL) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"failed to allocate memory for reconnect\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to allocate memory for reconnect\\n\", path);\n \t\tclose(fd);\n \t\treturn -1;\n \t}\n@@ -701,8 +691,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features)\n \tpthread_mutex_lock(&vhost_user.mutex);\n \tvsocket = find_vhost_user_socket(path);\n \tif (!vsocket) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"socket file %s is not registered yet.\\n\", path);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) socket file is not registered yet.\\n\", path);\n \t\tret = -1;\n \t\tgoto unlock_exit;\n \t}\n@@ -714,9 +703,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features)\n \t}\n \n \tif (vdpa_dev->ops->get_features(vdpa_dev, &vdpa_features) < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"failed to get vdpa features \"\n-\t\t\t\t\"for socket file %s.\\n\", path);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to get vdpa features for socket file.\\n\", path);\n \t\tret = -1;\n \t\tgoto unlock_exit;\n \t}\n@@ -754,8 +741,7 @@ rte_vhost_driver_get_protocol_features(const char *path,\n \tpthread_mutex_lock(&vhost_user.mutex);\n \tvsocket = find_vhost_user_socket(path);\n \tif (!vsocket) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"socket file %s is not registered yet.\\n\", path);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) socket file is not registered yet.\\n\", path);\n \t\tret = -1;\n \t\tgoto unlock_exit;\n \t}\n@@ -768,9 +754,8 @@ rte_vhost_driver_get_protocol_features(const char *path,\n \n \tif (vdpa_dev->ops->get_protocol_features(vdpa_dev,\n \t\t\t\t&vdpa_protocol_features) < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"failed to get vdpa protocol features \"\n-\t\t\t\t\"for socket file %s.\\n\", path);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to get vdpa protocol features.\\n\",\n+\t\t\t\tpath);\n \t\tret = -1;\n \t\tgoto unlock_exit;\n \t}\n@@ -794,8 +779,7 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)\n \tpthread_mutex_lock(&vhost_user.mutex);\n \tvsocket = find_vhost_user_socket(path);\n \tif (!vsocket) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"socket file %s is not registered yet.\\n\", path);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) socket file is not registered yet.\\n\", path);\n \t\tret = -1;\n \t\tgoto unlock_exit;\n \t}\n@@ -807,9 +791,8 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)\n \t}\n \n \tif (vdpa_dev->ops->get_queue_num(vdpa_dev, &vdpa_queue_num) < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"failed to get vdpa queue number \"\n-\t\t\t\t\"for socket file %s.\\n\", path);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to get vdpa queue number.\\n\",\n+\t\t\t\tpath);\n \t\tret = -1;\n \t\tgoto unlock_exit;\n \t}\n@@ -852,8 +835,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \tpthread_mutex_lock(&vhost_user.mutex);\n \n \tif (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"error: the number of vhost sockets reaches maximum\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) the number of vhost sockets reaches maximum\\n\",\n+\t\t\t\tpath);\n \t\tgoto out;\n \t}\n \n@@ -863,16 +846,14 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \tmemset(vsocket, 0, sizeof(struct vhost_user_socket));\n \tvsocket->path = strdup(path);\n \tif (vsocket->path == NULL) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"error: failed to copy socket path string\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to copy socket path string\\n\", path);\n \t\tvhost_user_socket_mem_free(vsocket);\n \t\tgoto out;\n \t}\n \tTAILQ_INIT(&vsocket->conn_list);\n \tret = pthread_mutex_init(&vsocket->conn_mutex, NULL);\n \tif (ret) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"error: failed to init connection mutex\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to init connection mutex\\n\", path);\n \t\tgoto out_free;\n \t}\n \tvsocket->vdpa_dev = NULL;\n@@ -884,9 +865,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \tif (vsocket->async_copy &&\n \t\t(flags & (RTE_VHOST_USER_IOMMU_SUPPORT |\n \t\tRTE_VHOST_USER_POSTCOPY_SUPPORT))) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"error: enabling async copy and IOMMU \"\n-\t\t\t\"or post-copy feature simultaneously is not \"\n-\t\t\t\"supported\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) enabling async copy and IOMMU \"\n+\t\t\t\"or post-copy feature simultaneously is not supported\\n\", path);\n \t\tgoto out_mutex;\n \t}\n \n@@ -910,8 +890,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \tif (vsocket->async_copy) {\n \t\tvsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL);\n \t\tvsocket->features &= ~(1ULL << VHOST_F_LOG_ALL);\n-\t\tVHOST_LOG_CONFIG(INFO,\n-\t\t\t\"Logging feature is disabled in async copy mode\\n\");\n+\t\tVHOST_LOG_CONFIG(INFO, \"(%s) logging feature is disabled in async copy mode\\n\",\n+\t\t\t\tpath);\n \t}\n \n \t/*\n@@ -925,9 +905,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \t\t\t\t(1ULL << VIRTIO_NET_F_HOST_TSO6) |\n \t\t\t\t(1ULL << VIRTIO_NET_F_HOST_UFO);\n \n-\t\tVHOST_LOG_CONFIG(INFO,\n-\t\t\t\"Linear buffers requested without external buffers, \"\n-\t\t\t\"disabling host segmentation offloading support\\n\");\n+\t\tVHOST_LOG_CONFIG(INFO, \"(%s) Linear buffers requested without external buffers, \"\n+\t\t\t\"disabling host segmentation offloading support\\n\", path);\n \t\tvsocket->supported_features &= ~seg_offload_features;\n \t\tvsocket->features &= ~seg_offload_features;\n \t}\n@@ -942,8 +921,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \t\t\t~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT);\n \t} else {\n #ifndef RTE_LIBRTE_VHOST_POSTCOPY\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"Postcopy requested but not compiled\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) Postcopy requested but not compiled\\n\", path);\n \t\tret = -1;\n \t\tgoto out_mutex;\n #endif\n@@ -970,8 +948,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \n out_mutex:\n \tif (pthread_mutex_destroy(&vsocket->conn_mutex)) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"error: failed to destroy connection mutex\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to destroy connection mutex\\n\", path);\n \t}\n out_free:\n \tvhost_user_socket_mem_free(vsocket);\n@@ -1059,9 +1036,7 @@ rte_vhost_driver_unregister(const char *path)\n \t\t\t\tgoto again;\n \t\t\t}\n \n-\t\t\tVHOST_LOG_CONFIG(INFO,\n-\t\t\t\t\"free connfd = %d for device '%s'\\n\",\n-\t\t\t\tconn->connfd, path);\n+\t\t\tVHOST_LOG_CONFIG(INFO, \"(%s) free connfd %d\\n\", path, conn->connfd);\n \t\t\tclose(conn->connfd);\n \t\t\tvhost_destroy_device(conn->vid);\n \t\t\tTAILQ_REMOVE(&vsocket->conn_list, conn, next);\n@@ -1137,8 +1112,7 @@ rte_vhost_driver_start(const char *path)\n \t\t * rebuild the wait list of poll.\n \t\t */\n \t\tif (fdset_pipe_init(&vhost_user.fdset) < 0) {\n-\t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"failed to create pipe for vhost fdset\\n\");\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to create pipe for vhost fdset\\n\", path);\n \t\t\treturn -1;\n \t\t}\n \n@@ -1146,8 +1120,7 @@ rte_vhost_driver_start(const char *path)\n \t\t\t\"vhost-events\", NULL, fdset_event_dispatch,\n \t\t\t&vhost_user.fdset);\n \t\tif (ret != 0) {\n-\t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"failed to create fdset handling thread\");\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to create fdset handling thread\", path);\n \n \t\t\tfdset_pipe_uninit(&vhost_user.fdset);\n \t\t\treturn -1;\n",
    "prefixes": [
        "3/7"
    ]
}