get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/105363/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 105363,
    "url": "http://patches.dpdk.org/api/patches/105363/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211223083659.245766-7-maxime.coquelin@redhat.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211223083659.245766-7-maxime.coquelin@redhat.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211223083659.245766-7-maxime.coquelin@redhat.com",
    "date": "2021-12-23T08:36:58",
    "name": "[6/7] vhost: improve Virtio-net layer logs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ee52c754e7b8062a5a0640767164c03e06b9ecae",
    "submitter": {
        "id": 512,
        "url": "http://patches.dpdk.org/api/people/512/?format=api",
        "name": "Maxime Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211223083659.245766-7-maxime.coquelin@redhat.com/mbox/",
    "series": [
        {
            "id": 21013,
            "url": "http://patches.dpdk.org/api/series/21013/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21013",
            "date": "2021-12-23T08:36:52",
            "name": "vhost: improve logging",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/21013/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/105363/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/105363/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9FCBBA0350;\n\tThu, 23 Dec 2021 09:38:32 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 7C9B841101;\n\tThu, 23 Dec 2021 09:38:32 +0100 (CET)",
            "from us-smtp-delivery-124.mimecast.com\n (us-smtp-delivery-124.mimecast.com [170.10.129.124])\n by mails.dpdk.org (Postfix) with ESMTP id 001A0410F1\n for <dev@dpdk.org>; Thu, 23 Dec 2021 09:38:30 +0100 (CET)",
            "from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com\n [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS\n (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n us-mta-75-FslaKDzuM9yz4zgfVdYZjQ-1; Thu, 23 Dec 2021 03:37:49 -0500",
            "from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com\n [10.5.11.11])\n (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n (No client certificate requested)\n by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 4DAC181EE62;\n Thu, 23 Dec 2021 08:37:48 +0000 (UTC)",
            "from max-t490s.redhat.com (unknown [10.39.208.14])\n by smtp.corp.redhat.com (Postfix) with ESMTP id 1A6205BD14;\n Thu, 23 Dec 2021 08:37:39 +0000 (UTC)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1640248710;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:cc:mime-version:mime-version:content-type:content-type:\n content-transfer-encoding:content-transfer-encoding:\n in-reply-to:in-reply-to:references:references;\n bh=lPsp9KBE0KhltqsyfWyGbnigHQrz9vCF+KCshEFvF/g=;\n b=dUh9k5pptsQ+7DnxgtydLhcmbhMIQS5vD5WEfZphoNLCkrzzAQQ28KMqTyBCUufSmCuIaS\n IOmWgIC9fTYnpqj8ZXoLf1bSHf4g0WIRrWN+p07MH/+5vm3Et/mQuk/lpE0xUvgFA0Wmz6\n LJe28TWgSt/pNCQHfjJOfeMFrw5iU04=",
        "X-MC-Unique": "FslaKDzuM9yz4zgfVdYZjQ-1",
        "From": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "To": "dev@dpdk.org,\n\tchenbo.xia@intel.com,\n\tdavid.marchand@redhat.com",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "Subject": "[PATCH 6/7] vhost: improve Virtio-net layer logs",
        "Date": "Thu, 23 Dec 2021 09:36:58 +0100",
        "Message-Id": "<20211223083659.245766-7-maxime.coquelin@redhat.com>",
        "In-Reply-To": "<20211223083659.245766-1-maxime.coquelin@redhat.com>",
        "References": "<20211223083659.245766-1-maxime.coquelin@redhat.com>",
        "MIME-Version": "1.0",
        "X-Scanned-By": "MIMEDefang 2.79 on 10.5.11.11",
        "Authentication-Results": "relay.mimecast.com;\n auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com",
        "X-Mimecast-Spam-Score": "0",
        "X-Mimecast-Originator": "redhat.com",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain; charset=\"US-ASCII\"",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This patch standardizes logging done in Virtio-net, so that\nthe Vhost-user socket path is always prepended to the logs.\nIt will ease log analysis when multiple Vhost-user ports\nare in use.\n\nSigned-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/vhost/virtio_net.c | 165 ++++++++++++++++++++---------------------\n 1 file changed, 79 insertions(+), 86 deletions(-)",
    "diff": "diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex b3d954aab4..f19713137c 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -792,12 +792,12 @@ copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,\n }\n \n static __rte_always_inline int\n-async_iter_initialize(struct vhost_async *async)\n+async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)\n {\n \tstruct rte_vhost_iov_iter *iter;\n \n \tif (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {\n-\t\tVHOST_LOG_DATA(ERR, \"no more async iovec available\\n\");\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) no more async iovec available\\n\", dev->ifname);\n \t\treturn -1;\n \t}\n \n@@ -809,7 +809,8 @@ async_iter_initialize(struct vhost_async *async)\n }\n \n static __rte_always_inline int\n-async_iter_add_iovec(struct vhost_async *async, void *src, void *dst, size_t len)\n+async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,\n+\t\tvoid *src, void *dst, size_t len)\n {\n \tstruct rte_vhost_iov_iter *iter;\n \tstruct rte_vhost_iovec *iovec;\n@@ -818,7 +819,7 @@ async_iter_add_iovec(struct vhost_async *async, void *src, void *dst, size_t len\n \t\tstatic bool vhost_max_async_vec_log;\n \n \t\tif (!vhost_max_async_vec_log) {\n-\t\t\tVHOST_LOG_DATA(ERR, \"no more async iovec available\\n\");\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%s) no more async iovec available\\n\", dev->ifname);\n \t\t\tvhost_max_async_vec_log = true;\n \t\t}\n \n@@ -876,11 +877,11 @@ async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\thpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,\n \t\t\t\tbuf_iova + buf_offset, cpy_len, &mapped_len);\n \t\tif (unlikely(!hpa)) {\n-\t\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to get hpa.\\n\", dev->vid, __func__);\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: failed to get hpa.\\n\", dev->ifname, __func__);\n \t\t\treturn -1;\n \t\t}\n \n-\t\tif (unlikely(async_iter_add_iovec(async,\n+\t\tif (unlikely(async_iter_add_iovec(dev, async,\n \t\t\t\t\t\t(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,\n \t\t\t\t\t\t\tmbuf_offset),\n \t\t\t\t\t\thpa, (size_t)mapped_len)))\n@@ -951,8 +952,8 @@ mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t} else\n \t\thdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) RX: num merge buffers %d\\n\",\n-\t\tdev->vid, num_buffers);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) RX: num merge buffers %d\\n\",\n+\t\tdev->ifname, num_buffers);\n \n \tif (unlikely(buf_len < dev->vhost_hlen)) {\n \t\tbuf_offset = dev->vhost_hlen - buf_len;\n@@ -970,7 +971,7 @@ mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tmbuf_offset = 0;\n \n \tif (is_async) {\n-\t\tif (async_iter_initialize(async))\n+\t\tif (async_iter_initialize(dev, async))\n \t\t\treturn -1;\n \t}\n \n@@ -1133,14 +1134,14 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\t\t\t\t\tpkt_len, buf_vec, &num_buffers,\n \t\t\t\t\t\tavail_head, &nr_vec) < 0)) {\n \t\t\tVHOST_LOG_DATA(DEBUG,\n-\t\t\t\t\"(%d) failed to get enough desc from vring\\n\",\n-\t\t\t\tdev->vid);\n+\t\t\t\t\"(%s) failed to get enough desc from vring\\n\",\n+\t\t\t\tdev->ifname);\n \t\t\tvq->shadow_used_idx -= num_buffers;\n \t\t\tbreak;\n \t\t}\n \n-\t\tVHOST_LOG_DATA(DEBUG, \"(%d) current index %d | end index %d\\n\",\n-\t\t\tdev->vid, vq->last_avail_idx,\n+\t\tVHOST_LOG_DATA(DEBUG, \"(%s) current index %d | end index %d\\n\",\n+\t\t\tdev->ifname, vq->last_avail_idx,\n \t\t\tvq->last_avail_idx + num_buffers);\n \n \t\tif (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,\n@@ -1287,14 +1288,13 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,\n \n \tif (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,\n \t\t\t\t\t\t &nr_descs) < 0)) {\n-\t\tVHOST_LOG_DATA(DEBUG,\n-\t\t\t\t\"(%d) failed to get enough desc from vring\\n\",\n-\t\t\t\tdev->vid);\n+\t\tVHOST_LOG_DATA(DEBUG, \"(%s) failed to get enough desc from vring\\n\",\n+\t\t\t\tdev->ifname);\n \t\treturn -1;\n \t}\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) current index %d | end index %d\\n\",\n-\t\t\tdev->vid, vq->last_avail_idx,\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) current index %d | end index %d\\n\",\n+\t\t\tdev->ifname, vq->last_avail_idx,\n \t\t\tvq->last_avail_idx + nr_descs);\n \n \tvq_inc_last_avail_packed(vq, nr_descs);\n@@ -1345,10 +1345,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,\n \tstruct vhost_virtqueue *vq;\n \tuint32_t nb_tx = 0;\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) %s\\n\", dev->ifname, __func__);\n \tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n@@ -1395,9 +1395,8 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,\n \t\treturn 0;\n \n \tif (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {\n-\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\"(%d) %s: built-in vhost net backend is disabled.\\n\",\n-\t\t\tdev->vid, __func__);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: built-in vhost net backend is disabled.\\n\",\n+\t\t\tdev->ifname, __func__);\n \t\treturn 0;\n \t}\n \n@@ -1479,14 +1478,14 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \n \t\tif (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,\n \t\t\t\t\t\t&num_buffers, avail_head, &nr_vec) < 0)) {\n-\t\t\tVHOST_LOG_DATA(DEBUG, \"(%d) failed to get enough desc from vring\\n\",\n-\t\t\t\t\tdev->vid);\n+\t\t\tVHOST_LOG_DATA(DEBUG, \"(%s) failed to get enough desc from vring\\n\",\n+\t\t\t\t\tdev->ifname);\n \t\t\tvq->shadow_used_idx -= num_buffers;\n \t\t\tbreak;\n \t\t}\n \n-\t\tVHOST_LOG_DATA(DEBUG, \"(%d) current index %d | end index %d\\n\",\n-\t\t\tdev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers);\n+\t\tVHOST_LOG_DATA(DEBUG, \"(%s) current index %d | end index %d\\n\",\n+\t\t\tdev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);\n \n \t\tif (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {\n \t\t\tvq->shadow_used_idx -= num_buffers;\n@@ -1505,8 +1504,8 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \n \tn_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);\n \tif (unlikely(n_xfer < 0)) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to transfer data for queue id %d.\\n\",\n-\t\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\tdev->ifname, __func__, queue_id);\n \t\tn_xfer = 0;\n \t}\n \n@@ -1619,12 +1618,12 @@ virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n \tif (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,\n \t\t\t\t\tnr_descs, nr_buffers) < 0)) {\n-\t\tVHOST_LOG_DATA(DEBUG, \"(%d) failed to get enough desc from vring\\n\", dev->vid);\n+\t\tVHOST_LOG_DATA(DEBUG, \"(%s) failed to get enough desc from vring\\n\", dev->ifname);\n \t\treturn -1;\n \t}\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) current index %d | end index %d\\n\",\n-\t\t\tdev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) current index %d | end index %d\\n\",\n+\t\t\tdev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);\n \n \treturn 0;\n }\n@@ -1696,8 +1695,8 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,\n \n \tn_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);\n \tif (unlikely(n_xfer < 0)) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to transfer data for queue id %d.\\n\",\n-\t\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\tdev->ifname, __func__, queue_id);\n \t\tn_xfer = 0;\n \t}\n \n@@ -1837,8 +1836,8 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,\n \n \tn_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count);\n \tif (unlikely(n_cpl < 0)) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to check completed copies for queue id %d.\\n\",\n-\t\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: failed to check completed copies for queue id %d.\\n\",\n+\t\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n@@ -1891,18 +1890,18 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tif (unlikely(!dev))\n \t\treturn 0;\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) %s\\n\", dev->ifname, __func__);\n \tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n \tvq = dev->virtqueue[queue_id];\n \n \tif (unlikely(!vq->async)) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: async not registered for queue id %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: async not registered for queue id %d.\\n\",\n+\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n@@ -1926,18 +1925,18 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,\n \tif (!dev)\n \t\treturn 0;\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) %s\\n\", dev->ifname, __func__);\n \tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n \tvq = dev->virtqueue[queue_id];\n \n \tif (unlikely(!vq->async)) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: async not registered for queue id %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: async not registered for queue id %d.\\n\",\n+\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n@@ -1953,10 +1952,10 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,\n \tstruct vhost_virtqueue *vq;\n \tuint32_t nb_tx = 0;\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) %s\\n\", dev->ifname, __func__);\n \tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n@@ -2005,9 +2004,8 @@ rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,\n \t\treturn 0;\n \n \tif (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {\n-\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\"(%d) %s: built-in vhost net backend is disabled.\\n\",\n-\t\t\tdev->vid, __func__);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: built-in vhost net backend is disabled.\\n\",\n+\t\t\tdev->ifname, __func__);\n \t\treturn 0;\n \t}\n \n@@ -2114,7 +2112,8 @@ parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)\n }\n \n static __rte_always_inline void\n-vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)\n+vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,\n+\t\tstruct rte_mbuf *m)\n {\n \tuint8_t l4_proto = 0;\n \tstruct rte_tcp_hdr *tcp_hdr = NULL;\n@@ -2174,8 +2173,8 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)\n \t\t\tm->l4_len = sizeof(struct rte_udp_hdr);\n \t\t\tbreak;\n \t\tdefault:\n-\t\t\tVHOST_LOG_DATA(WARNING,\n-\t\t\t\t\"unsupported gso type %u.\\n\", hdr->gso_type);\n+\t\t\tVHOST_LOG_DATA(WARNING, \"(%s) unsupported gso type %u.\\n\",\n+\t\t\t\t\tdev->ifname, hdr->gso_type);\n \t\t\tgoto error;\n \t\t}\n \t}\n@@ -2188,8 +2187,8 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)\n }\n \n static __rte_always_inline void\n-vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,\n-\tbool legacy_ol_flags)\n+vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,\n+\t\tstruct rte_mbuf *m, bool legacy_ol_flags)\n {\n \tstruct rte_net_hdr_lens hdr_lens;\n \tint l4_supported = 0;\n@@ -2199,7 +2198,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,\n \t\treturn;\n \n \tif (legacy_ol_flags) {\n-\t\tvhost_dequeue_offload_legacy(hdr, m);\n+\t\tvhost_dequeue_offload_legacy(dev, hdr, m);\n \t\treturn;\n \t}\n \n@@ -2412,8 +2411,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\tif (mbuf_avail == 0) {\n \t\t\tcur = rte_pktmbuf_alloc(mbuf_pool);\n \t\t\tif (unlikely(cur == NULL)) {\n-\t\t\t\tVHOST_LOG_DATA(ERR, \"Failed to \"\n-\t\t\t\t\t\"allocate memory for mbuf.\\n\");\n+\t\t\t\tVHOST_LOG_DATA(ERR, \"(%s) failed to allocate memory for mbuf.\\n\",\n+\t\t\t\t\t\tdev->ifname);\n \t\t\t\terror = -1;\n \t\t\t\tgoto out;\n \t\t\t}\n@@ -2433,7 +2432,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tm->pkt_len    += mbuf_offset;\n \n \tif (hdr)\n-\t\tvhost_dequeue_offload(hdr, m, legacy_ol_flags);\n+\t\tvhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);\n \n out:\n \n@@ -2447,7 +2446,7 @@ virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)\n }\n \n static int\n-virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)\n+virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t size)\n {\n \tstruct rte_mbuf_ext_shared_info *shinfo = NULL;\n \tuint32_t total_len = RTE_PKTMBUF_HEADROOM + size;\n@@ -2471,7 +2470,7 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)\n \t\t\t\t\t\tvirtio_dev_extbuf_free, buf);\n \tif (unlikely(shinfo == NULL)) {\n \t\trte_free(buf);\n-\t\tVHOST_LOG_DATA(ERR, \"Failed to init shinfo\\n\");\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) failed to init shinfo\\n\", dev->ifname);\n \t\treturn -1;\n \t}\n \n@@ -2493,7 +2492,7 @@ virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,\n \t\treturn 0;\n \n \t/* attach an external buffer if supported */\n-\tif (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))\n+\tif (dev->extbuf && !virtio_dev_extbuf_alloc(dev, pkt, data_len))\n \t\treturn 0;\n \n \t/* check if chained buffers are allowed */\n@@ -2525,12 +2524,12 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n \trte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);\n \n-\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) %s\\n\", dev->ifname, __func__);\n \n \tcount = RTE_MIN(count, MAX_PKT_BURST);\n \tcount = RTE_MIN(count, free_entries);\n-\tVHOST_LOG_DATA(DEBUG, \"(%d) about to dequeue %u buffers\\n\",\n-\t\t\tdev->vid, count);\n+\tVHOST_LOG_DATA(DEBUG, \"(%s) about to dequeue %u buffers\\n\",\n+\t\t\tdev->ifname, count);\n \n \tif (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))\n \t\treturn 0;\n@@ -2559,9 +2558,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\t\t * is required. Drop this packet.\n \t\t\t */\n \t\t\tif (!allocerr_warned) {\n-\t\t\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\t\t\"Failed mbuf alloc of size %d from %s on %s.\\n\",\n-\t\t\t\t\tbuf_len, mbuf_pool->name, dev->ifname);\n+\t\t\t\tVHOST_LOG_DATA(ERR, \"(%s) failed mbuf alloc of size %d from %s.\\n\",\n+\t\t\t\t\tdev->ifname, buf_len, mbuf_pool->name);\n \t\t\t\tallocerr_warned = true;\n \t\t\t}\n \t\t\tdropped += 1;\n@@ -2573,8 +2571,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\t\t\tmbuf_pool, legacy_ol_flags);\n \t\tif (unlikely(err)) {\n \t\t\tif (!allocerr_warned) {\n-\t\t\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\t\t\"Failed to copy desc to mbuf on %s.\\n\",\n+\t\t\t\tVHOST_LOG_DATA(ERR, \"(%s) failed to copy desc to mbuf.\\n\",\n \t\t\t\t\tdev->ifname);\n \t\t\t\tallocerr_warned = true;\n \t\t\t}\n@@ -2717,7 +2714,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,\n \tif (virtio_net_with_host_offload(dev)) {\n \t\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n \t\t\thdr = (struct virtio_net_hdr *)(desc_addrs[i]);\n-\t\t\tvhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);\n+\t\t\tvhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);\n \t\t}\n \t}\n \n@@ -2756,9 +2753,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,\n \n \tif (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {\n \t\tif (!allocerr_warned) {\n-\t\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\t\"Failed mbuf alloc of size %d from %s on %s.\\n\",\n-\t\t\t\tbuf_len, mbuf_pool->name, dev->ifname);\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%s) failed mbuf alloc of size %d from %s.\\n\",\n+\t\t\t\tdev->ifname, buf_len, mbuf_pool->name);\n \t\t\tallocerr_warned = true;\n \t\t}\n \t\treturn -1;\n@@ -2768,8 +2764,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,\n \t\t\t\tmbuf_pool, legacy_ol_flags);\n \tif (unlikely(err)) {\n \t\tif (!allocerr_warned) {\n-\t\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\t\"Failed to copy desc to mbuf on %s.\\n\",\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%s) failed to copy desc to mbuf.\\n\",\n \t\t\t\tdev->ifname);\n \t\t\tallocerr_warned = true;\n \t\t}\n@@ -2885,16 +2880,14 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,\n \t\treturn 0;\n \n \tif (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {\n-\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\"(%d) %s: built-in vhost net backend is disabled.\\n\",\n-\t\t\tdev->vid, __func__);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: built-in vhost net backend is disabled.\\n\",\n+\t\t\t\tdev->ifname, __func__);\n \t\treturn 0;\n \t}\n \n \tif (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(ERR,\n-\t\t\t\"(%d) %s: invalid virtqueue idx %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n+\t\tVHOST_LOG_DATA(ERR, \"(%s) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\t\tdev->ifname, __func__, queue_id);\n \t\treturn 0;\n \t}\n \n@@ -2939,7 +2932,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,\n \n \t\trarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);\n \t\tif (rarp_mbuf == NULL) {\n-\t\t\tVHOST_LOG_DATA(ERR, \"Failed to make RARP packet.\\n\");\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%s) failed to make RARP packet.\\n\", dev->ifname);\n \t\t\tcount = 0;\n \t\t\tgoto out;\n \t\t}\n",
    "prefixes": [
        "6/7"
    ]
}