get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/105360/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 105360,
    "url": "http://patches.dpdk.org/api/patches/105360/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211223083659.245766-5-maxime.coquelin@redhat.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211223083659.245766-5-maxime.coquelin@redhat.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211223083659.245766-5-maxime.coquelin@redhat.com",
    "date": "2021-12-23T08:36:56",
    "name": "[4/7] vhost: improve Vhost layer logs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3c80b8efdbff2ba7b0840944d9db06b2079a09f2",
    "submitter": {
        "id": 512,
        "url": "http://patches.dpdk.org/api/people/512/?format=api",
        "name": "Maxime Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211223083659.245766-5-maxime.coquelin@redhat.com/mbox/",
    "series": [
        {
            "id": 21013,
            "url": "http://patches.dpdk.org/api/series/21013/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21013",
            "date": "2021-12-23T08:36:52",
            "name": "vhost: improve logging",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/21013/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/105360/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/105360/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5B2C3A0350;\n\tThu, 23 Dec 2021 09:37:50 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B695041157;\n\tThu, 23 Dec 2021 09:37:39 +0100 (CET)",
            "from us-smtp-delivery-124.mimecast.com\n (us-smtp-delivery-124.mimecast.com [170.10.129.124])\n by mails.dpdk.org (Postfix) with ESMTP id 9135941155\n for <dev@dpdk.org>; Thu, 23 Dec 2021 09:37:38 +0100 (CET)",
            "from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com\n [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS\n (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n us-mta-423-QEyuuk7cOs23Sf04JEnPsg-1; Thu, 23 Dec 2021 03:37:35 -0500",
            "from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com\n [10.5.11.11])\n (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n (No client certificate requested)\n by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 15D8A1023F4D;\n Thu, 23 Dec 2021 08:37:34 +0000 (UTC)",
            "from max-t490s.redhat.com (unknown [10.39.208.14])\n by smtp.corp.redhat.com (Postfix) with ESMTP id ED49F5BD14;\n Thu, 23 Dec 2021 08:37:32 +0000 (UTC)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1640248658;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:cc:mime-version:mime-version:content-type:content-type:\n content-transfer-encoding:content-transfer-encoding:\n in-reply-to:in-reply-to:references:references;\n bh=MwIUU9AkyrAAk3mlArJaYbTVD+rN/2oEbG7O3VclWZQ=;\n b=UEYbg35VcAJFr8FmOugCZV276j3o6SsLd0wRw2DAlFpmZ2D4FQswj+hcPaKrMv7cA6JuTt\n IpS9xQ08f1Y3uO7w3YcDgxI+l9RxkZPv2yW/Zb2K7w3De9tQdr3LKUi3WN2FLCu2xLIhP7\n wmWuDWVfOAg8tiTBgId88CR7s99bAq4=",
        "X-MC-Unique": "QEyuuk7cOs23Sf04JEnPsg-1",
        "From": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "To": "dev@dpdk.org,\n\tchenbo.xia@intel.com,\n\tdavid.marchand@redhat.com",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "Subject": "[PATCH 4/7] vhost: improve Vhost layer logs",
        "Date": "Thu, 23 Dec 2021 09:36:56 +0100",
        "Message-Id": "<20211223083659.245766-5-maxime.coquelin@redhat.com>",
        "In-Reply-To": "<20211223083659.245766-1-maxime.coquelin@redhat.com>",
        "References": "<20211223083659.245766-1-maxime.coquelin@redhat.com>",
        "MIME-Version": "1.0",
        "X-Scanned-By": "MIMEDefang 2.79 on 10.5.11.11",
        "Authentication-Results": "relay.mimecast.com;\n auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com",
        "X-Mimecast-Spam-Score": "0",
        "X-Mimecast-Originator": "redhat.com",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain; charset=\"US-ASCII\"",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This patch prepends Vhost logs with the Vhost-user socket\npatch when available to ease filtering logs for a given port.\n\nSigned-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/vhost/vhost.c | 104 +++++++++++++++++++++++-----------------------\n 1 file changed, 51 insertions(+), 53 deletions(-)",
    "diff": "diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c\nindex e52d7f7bb6..dac0915ac0 100644\n--- a/lib/vhost/vhost.c\n+++ b/lib/vhost/vhost.c\n@@ -58,9 +58,8 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n \t\tvhost_user_iotlb_pending_insert(dev, vq, iova, perm);\n \t\tif (vhost_user_iotlb_miss(dev, iova, perm)) {\n-\t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"IOTLB miss req failed for IOVA 0x%\" PRIx64 \"\\n\",\n-\t\t\t\tiova);\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%s) IOTLB miss req failed for IOVA 0x%\" PRIx64 \"\\n\",\n+\t\t\t\tdev->ifname, iova);\n \t\t\tvhost_user_iotlb_pending_remove(vq, iova, 1, perm);\n \t\t}\n \n@@ -126,8 +125,8 @@ __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \thva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);\n \tif (map_len != len) {\n \t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\"Failed to write log for IOVA 0x%\" PRIx64 \". No IOTLB entry found\\n\",\n-\t\t\tiova);\n+\t\t\t\"(%s) failed to write log for IOVA 0x%\" PRIx64 \". No IOTLB entry found\\n\",\n+\t\t\tdev->ifname, iova);\n \t\treturn;\n \t}\n \n@@ -243,8 +242,8 @@ __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \thva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);\n \tif (map_len != len) {\n \t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\"Failed to write log for IOVA 0x%\" PRIx64 \". No IOTLB entry found\\n\",\n-\t\t\tiova);\n+\t\t\t\"(%s) failed to write log for IOVA 0x%\" PRIx64 \". No IOTLB entry found\\n\",\n+\t\t\tdev->ifname, iova);\n \t\treturn;\n \t}\n \n@@ -422,9 +421,9 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\tgpa = hva_to_gpa(dev, hva, exp_size);\n \t\tif (!gpa) {\n \t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"VQ: Failed to find GPA for log_addr: 0x%\"\n+\t\t\t\t\"(%s) failed to find GPA for log_addr: 0x%\"\n \t\t\t\tPRIx64 \" hva: 0x%\" PRIx64 \"\\n\",\n-\t\t\t\tlog_addr, hva);\n+\t\t\t\tdev->ifname, log_addr, hva);\n \t\t\treturn 0;\n \t\t}\n \t\treturn gpa;\n@@ -551,16 +550,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n \tint numa_node = SOCKET_ID_ANY;\n \n \tif (vring_idx >= VHOST_MAX_VRING) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"Failed not init vring, out of bound (%d)\\n\",\n-\t\t\t\tvring_idx);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed not init vring, out of bound (%d)\\n\",\n+\t\t\t\tdev->ifname, vring_idx);\n \t\treturn;\n \t}\n \n \tvq = dev->virtqueue[vring_idx];\n \tif (!vq) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"Virtqueue not allocated (%d)\\n\",\n-\t\t\t\tvring_idx);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) virtqueue not allocated (%d)\\n\",\n+\t\t\t\tdev->ifname, vring_idx);\n \t\treturn;\n \t}\n \n@@ -572,8 +570,8 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n \n #ifdef RTE_LIBRTE_VHOST_NUMA\n \tif (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"(%d) failed to query numa node: %s\\n\",\n-\t\t\tdev->vid, rte_strerror(errno));\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to query numa node: %s\\n\",\n+\t\t\tdev->ifname, rte_strerror(errno));\n \t\tnuma_node = SOCKET_ID_ANY;\n \t}\n #endif\n@@ -590,15 +588,15 @@ reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n \n \tif (vring_idx >= VHOST_MAX_VRING) {\n \t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"Failed not init vring, out of bound (%d)\\n\",\n-\t\t\t\tvring_idx);\n+\t\t\t\t\"(%s) failed to reset vring, out of bound (%d)\\n\",\n+\t\t\t\tdev->ifname, vring_idx);\n \t\treturn;\n \t}\n \n \tvq = dev->virtqueue[vring_idx];\n \tif (!vq) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"Virtqueue not allocated (%d)\\n\",\n-\t\t\t\tvring_idx);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to reset vring, virtqueue not allocated (%d)\\n\",\n+\t\t\t\tdev->ifname, vring_idx);\n \t\treturn;\n \t}\n \n@@ -620,8 +618,8 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n \n \t\tvq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);\n \t\tif (vq == NULL) {\n-\t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"Failed to allocate memory for vring:%u.\\n\", i);\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to allocate memory for vring %u.\\n\",\n+\t\t\t\t\tdev->ifname, i);\n \t\t\treturn -1;\n \t\t}\n \n@@ -673,16 +671,14 @@ vhost_new_device(void)\n \t}\n \n \tif (i == MAX_VHOST_DEVICE) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"Failed to find a free slot for new device.\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"failed to find a free slot for new device.\\n\");\n \t\tpthread_mutex_unlock(&vhost_dev_lock);\n \t\treturn -1;\n \t}\n \n \tdev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);\n \tif (dev == NULL) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"Failed to allocate memory for new dev.\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"failed to allocate memory for new device.\\n\");\n \t\tpthread_mutex_unlock(&vhost_dev_lock);\n \t\treturn -1;\n \t}\n@@ -834,9 +830,8 @@ rte_vhost_get_numa_node(int vid)\n \tret = get_mempolicy(&numa_node, NULL, 0, dev,\n \t\t\t    MPOL_F_NODE | MPOL_F_ADDR);\n \tif (ret < 0) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"(%d) failed to query numa node: %s\\n\",\n-\t\t\tvid, rte_strerror(errno));\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to query numa node: %s\\n\",\n+\t\t\tdev->ifname, rte_strerror(errno));\n \t\treturn -1;\n \t}\n \n@@ -1470,8 +1465,8 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)\n \t\treturn 0;\n \n \tif (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n-\t\t\tdev->vid, __func__, qid);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\tdev->ifname, __func__, qid);\n \t\treturn 0;\n \t}\n \n@@ -1636,23 +1631,23 @@ async_channel_register(int vid, uint16_t queue_id,\n \n \tif (unlikely(vq->async)) {\n \t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\"async register failed: already registered (vid %d, qid: %d)\\n\",\n-\t\t\t\tvid, queue_id);\n+\t\t\t\t\"(%s) async register failed: already registered (qid: %d)\\n\",\n+\t\t\t\tdev->ifname, queue_id);\n \t\treturn -1;\n \t}\n \n \tasync = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);\n \tif (!async) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"failed to allocate async metadata (vid %d, qid: %d)\\n\",\n-\t\t\t\tvid, queue_id);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to allocate async metadata (qid: %d)\\n\",\n+\t\t\t\tdev->ifname, queue_id);\n \t\treturn -1;\n \t}\n \n \tasync->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),\n \t\t\tRTE_CACHE_LINE_SIZE, node);\n \tif (!async->pkts_info) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"failed to allocate async_pkts_info (vid %d, qid: %d)\\n\",\n-\t\t\t\tvid, queue_id);\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to allocate async_pkts_info (qid: %d)\\n\",\n+\t\t\t\tdev->ifname, queue_id);\n \t\tgoto out_free_async;\n \t}\n \n@@ -1661,8 +1656,8 @@ async_channel_register(int vid, uint16_t queue_id,\n \t\t\t\tvq->size * sizeof(struct vring_used_elem_packed),\n \t\t\t\tRTE_CACHE_LINE_SIZE, node);\n \t\tif (!async->buffers_packed) {\n-\t\t\tVHOST_LOG_CONFIG(ERR, \"failed to allocate async buffers (vid %d, qid: %d)\\n\",\n-\t\t\t\t\tvid, queue_id);\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to allocate async buffers (qid: %d)\\n\",\n+\t\t\t\t\tdev->ifname, queue_id);\n \t\t\tgoto out_free_inflight;\n \t\t}\n \t} else {\n@@ -1670,8 +1665,8 @@ async_channel_register(int vid, uint16_t queue_id,\n \t\t\t\tvq->size * sizeof(struct vring_used_elem),\n \t\t\t\tRTE_CACHE_LINE_SIZE, node);\n \t\tif (!async->descs_split) {\n-\t\t\tVHOST_LOG_CONFIG(ERR, \"failed to allocate async descs (vid %d, qid: %d)\\n\",\n-\t\t\t\t\tvid, queue_id);\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to allocate async descs (qid: %d)\\n\",\n+\t\t\t\t\tdev->ifname, queue_id);\n \t\t\tgoto out_free_inflight;\n \t\t}\n \t}\n@@ -1712,8 +1707,8 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \n \tif (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {\n \t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"async copy is not supported on non-inorder mode \"\n-\t\t\t\"(vid %d, qid: %d)\\n\", vid, queue_id);\n+\t\t\t\"(%s) async copy is not supported on non-inorder mode (qid: %d)\\n\",\n+\t\t\tdev->ifname, queue_id);\n \t\treturn -1;\n \t}\n \n@@ -1749,8 +1744,8 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,\n \n \tif (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {\n \t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"async copy is not supported on non-inorder mode \"\n-\t\t\t\"(vid %d, qid: %d)\\n\", vid, queue_id);\n+\t\t\t\"(%s) async copy is not supported on non-inorder mode (qid: %d)\\n\",\n+\t\t\tdev->ifname, queue_id);\n \t\treturn -1;\n \t}\n \n@@ -1785,14 +1780,15 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)\n \t\treturn ret;\n \n \tif (!rte_spinlock_trylock(&vq->access_lock)) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"Failed to unregister async channel. \"\n-\t\t\t\"virt queue busy.\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to unregister async channel, virtqueue busy.\\n\",\n+\t\t\t\tdev->ifname);\n \t\treturn -1;\n \t}\n \n \tif (vq->async->pkts_inflight_n) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"Failed to unregister async channel. \"\n-\t\t\t\"async inflight packets must be completed before unregistration.\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to unregister async channel. \"\n+\t\t\t\"async inflight packets must be completed before unregistration.\\n\",\n+\t\t\tdev->ifname);\n \t\tret = -1;\n \t\tgoto out;\n \t}\n@@ -1825,8 +1821,9 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)\n \t\treturn 0;\n \n \tif (vq->async->pkts_inflight_n) {\n-\t\tVHOST_LOG_CONFIG(ERR, \"Failed to unregister async channel. \"\n-\t\t\t\"async inflight packets must be completed before unregistration.\\n\");\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%s) failed to unregister async channel. \"\n+\t\t\t\"async inflight packets must be completed before unregistration.\\n\",\n+\t\t\tdev->ifname);\n \t\treturn -1;\n \t}\n \n@@ -1857,8 +1854,9 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)\n \t\treturn ret;\n \n \tif (!rte_spinlock_trylock(&vq->access_lock)) {\n-\t\tVHOST_LOG_CONFIG(DEBUG, \"Failed to check in-flight packets. \"\n-\t\t\t\"virt queue busy.\\n\");\n+\t\tVHOST_LOG_CONFIG(DEBUG,\n+\t\t\t\"(%s) failed to check in-flight packets. virtqueue busy.\\n\",\n+\t\t\tdev->ifname);\n \t\treturn ret;\n \t}\n \n",
    "prefixes": [
        "4/7"
    ]
}