get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94369/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94369,
    "url": "https://patches.dpdk.org/api/patches/94369/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210617153739.178011-7-maxime.coquelin@redhat.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210617153739.178011-7-maxime.coquelin@redhat.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210617153739.178011-7-maxime.coquelin@redhat.com",
    "date": "2021-06-17T15:37:38",
    "name": "[v4,6/7] vhost: allocate all data on same node as virtqueue",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4ae56b5cb88c2db2b20139421740e7fe7ac2bca8",
    "submitter": {
        "id": 512,
        "url": "https://patches.dpdk.org/api/people/512/?format=api",
        "name": "Maxime Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "delegate": {
        "id": 2642,
        "url": "https://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210617153739.178011-7-maxime.coquelin@redhat.com/mbox/",
    "series": [
        {
            "id": 17381,
            "url": "https://patches.dpdk.org/api/series/17381/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17381",
            "date": "2021-06-17T15:37:32",
            "name": "vhost: Fix and improve NUMA reallocation",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/17381/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94369/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/94369/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A4087A0C4B;\n\tThu, 17 Jun 2021 17:39:07 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2AA6A410F0;\n\tThu, 17 Jun 2021 17:39:03 +0200 (CEST)",
            "from us-smtp-delivery-124.mimecast.com\n (us-smtp-delivery-124.mimecast.com [170.10.133.124])\n by mails.dpdk.org (Postfix) with ESMTP id E9A4F410EE\n for <dev@dpdk.org>; Thu, 17 Jun 2021 17:39:01 +0200 (CEST)",
            "from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com\n [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id\n us-mta-344-ixstYSwkMJOYbCePiK_RwQ-1; Thu, 17 Jun 2021 11:39:00 -0400",
            "from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com\n [10.5.11.12])\n (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n (No client certificate requested)\n by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 50A2180ED8E;\n Thu, 17 Jun 2021 15:38:33 +0000 (UTC)",
            "from max-t490s.redhat.com (unknown [10.36.110.45])\n by smtp.corp.redhat.com (Postfix) with ESMTP id 398227012F;\n Thu, 17 Jun 2021 15:38:32 +0000 (UTC)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1623944341;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:cc:mime-version:mime-version:content-type:content-type:\n content-transfer-encoding:content-transfer-encoding:\n in-reply-to:in-reply-to:references:references;\n bh=Lq5S9QpNR+V/YLgN1FInx0fcINIGFJRQhaj32q8wCGg=;\n b=Iz9c8WrCB6dH8rONkz5ZvVSzzjyHVsknL542po4IrITL6IUvz/uDA/Q3C4ngERjwRFW+4o\n EePXTKUv2GAAv6LQxM64bTfsDZVwmTxIAFi++2hHian7xAP+8uIptl4IR0Ewvb4KTwmpsW\n mnssPDtsnYTmpqtwXBliNu6Wzk19G4s=",
        "X-MC-Unique": "ixstYSwkMJOYbCePiK_RwQ-1",
        "From": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "To": "dev@dpdk.org,\n\tdavid.marchand@redhat.com,\n\tchenbo.xia@intel.com",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "Date": "Thu, 17 Jun 2021 17:37:38 +0200",
        "Message-Id": "<20210617153739.178011-7-maxime.coquelin@redhat.com>",
        "In-Reply-To": "<20210617153739.178011-1-maxime.coquelin@redhat.com>",
        "References": "<20210617153739.178011-1-maxime.coquelin@redhat.com>",
        "MIME-Version": "1.0",
        "X-Scanned-By": "MIMEDefang 2.79 on 10.5.11.12",
        "Authentication-Results": "relay.mimecast.com;\n auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com",
        "X-Mimecast-Spam-Score": "0",
        "X-Mimecast-Originator": "redhat.com",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain; charset=\"US-ASCII\"",
        "Subject": "[dpdk-dev] [PATCH v4 6/7] vhost: allocate all data on same node as\n virtqueue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch saves the NUMA node the virtqueue is allocated\non at init time, in order to allocate all other data on the\nsame node.\n\nWhile most of the data are allocated before numa_realloc()\nis called and so the data will be reallocated properly, some\ndata like the log cache are most likely allocated after.\n\nFor the virtio device metadata, we decide to allocate them\non the same node as the VQ 0.\n\nSigned-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/vhost/vhost.c      | 34 ++++++++++++++++------------------\n lib/vhost/vhost.h      |  1 +\n lib/vhost/vhost_user.c | 41 ++++++++++++++++++++++++++++-------------\n 3 files changed, 45 insertions(+), 31 deletions(-)",
    "diff": "diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c\nindex c96f6335c8..0000cd3297 100644\n--- a/lib/vhost/vhost.c\n+++ b/lib/vhost/vhost.c\n@@ -261,7 +261,7 @@ vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tuint64_t src, dst;\n \tuint64_t len, remain = desc_len;\n \n-\tidesc = rte_malloc(__func__, desc_len, 0);\n+\tidesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node);\n \tif (unlikely(!idesc))\n \t\treturn NULL;\n \n@@ -549,6 +549,7 @@ static void\n init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n {\n \tstruct vhost_virtqueue *vq;\n+\tint numa_node = SOCKET_ID_ANY;\n \n \tif (vring_idx >= VHOST_MAX_VRING) {\n \t\tVHOST_LOG_CONFIG(ERR,\n@@ -570,6 +571,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n \tvq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;\n \tvq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF;\n \n+#ifdef RTE_LIBRTE_VHOST_NUMA\n+\tif (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {\n+\t\tVHOST_LOG_CONFIG(ERR, \"(%d) failed to query numa node: %s\\n\",\n+\t\t\tdev->vid, rte_strerror(errno));\n+\t\tnuma_node = SOCKET_ID_ANY;\n+\t}\n+#endif\n+\tvq->numa_node = numa_node;\n+\n \tvhost_user_iotlb_init(dev, vring_idx);\n }\n \n@@ -1616,7 +1626,6 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \tstruct vhost_virtqueue *vq;\n \tstruct virtio_net *dev = get_device(vid);\n \tstruct rte_vhost_async_features f;\n-\tint node;\n \n \tif (dev == NULL || ops == NULL)\n \t\treturn -1;\n@@ -1651,20 +1660,9 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \t\tgoto reg_out;\n \t}\n \n-#ifdef RTE_LIBRTE_VHOST_NUMA\n-\tif (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {\n-\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\"unable to get numa information in async register. \"\n-\t\t\t\"allocating async buffer memory on the caller thread node\\n\");\n-\t\tnode = SOCKET_ID_ANY;\n-\t}\n-#else\n-\tnode = SOCKET_ID_ANY;\n-#endif\n-\n \tvq->async_pkts_info = rte_malloc_socket(NULL,\n \t\t\tvq->size * sizeof(struct async_inflight_info),\n-\t\t\tRTE_CACHE_LINE_SIZE, node);\n+\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \tif (!vq->async_pkts_info) {\n \t\tvhost_free_async_mem(vq);\n \t\tVHOST_LOG_CONFIG(ERR,\n@@ -1675,7 +1673,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \n \tvq->it_pool = rte_malloc_socket(NULL,\n \t\t\tVHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),\n-\t\t\tRTE_CACHE_LINE_SIZE, node);\n+\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \tif (!vq->it_pool) {\n \t\tvhost_free_async_mem(vq);\n \t\tVHOST_LOG_CONFIG(ERR,\n@@ -1686,7 +1684,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \n \tvq->vec_pool = rte_malloc_socket(NULL,\n \t\t\tVHOST_MAX_ASYNC_VEC * sizeof(struct iovec),\n-\t\t\tRTE_CACHE_LINE_SIZE, node);\n+\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \tif (!vq->vec_pool) {\n \t\tvhost_free_async_mem(vq);\n \t\tVHOST_LOG_CONFIG(ERR,\n@@ -1698,7 +1696,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \tif (vq_is_packed(dev)) {\n \t\tvq->async_buffers_packed = rte_malloc_socket(NULL,\n \t\t\tvq->size * sizeof(struct vring_used_elem_packed),\n-\t\t\tRTE_CACHE_LINE_SIZE, node);\n+\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \t\tif (!vq->async_buffers_packed) {\n \t\t\tvhost_free_async_mem(vq);\n \t\t\tVHOST_LOG_CONFIG(ERR,\n@@ -1709,7 +1707,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \t} else {\n \t\tvq->async_descs_split = rte_malloc_socket(NULL,\n \t\t\tvq->size * sizeof(struct vring_used_elem),\n-\t\t\tRTE_CACHE_LINE_SIZE, node);\n+\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \t\tif (!vq->async_descs_split) {\n \t\t\tvhost_free_async_mem(vq);\n \t\t\tVHOST_LOG_CONFIG(ERR,\ndiff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h\nindex 8078ddff79..8ffe387556 100644\n--- a/lib/vhost/vhost.h\n+++ b/lib/vhost/vhost.h\n@@ -164,6 +164,7 @@ struct vhost_virtqueue {\n \n \tuint16_t\t\tbatch_copy_nb_elems;\n \tstruct batch_copy_elem\t*batch_copy_elems;\n+\tint\t\t\tnuma_node;\n \tbool\t\t\tused_wrap_counter;\n \tbool\t\t\tavail_wrap_counter;\n \ndiff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c\nindex 0590ef6d14..faf1e7db84 100644\n--- a/lib/vhost/vhost_user.c\n+++ b/lib/vhost/vhost_user.c\n@@ -433,10 +433,10 @@ vhost_user_set_vring_num(struct virtio_net **pdev,\n \tif (vq_is_packed(dev)) {\n \t\tif (vq->shadow_used_packed)\n \t\t\trte_free(vq->shadow_used_packed);\n-\t\tvq->shadow_used_packed = rte_malloc(NULL,\n+\t\tvq->shadow_used_packed = rte_malloc_socket(NULL,\n \t\t\t\tvq->size *\n \t\t\t\tsizeof(struct vring_used_elem_packed),\n-\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \t\tif (!vq->shadow_used_packed) {\n \t\t\tVHOST_LOG_CONFIG(ERR,\n \t\t\t\t\t\"failed to allocate memory for shadow used ring.\\n\");\n@@ -447,9 +447,9 @@ vhost_user_set_vring_num(struct virtio_net **pdev,\n \t\tif (vq->shadow_used_split)\n \t\t\trte_free(vq->shadow_used_split);\n \n-\t\tvq->shadow_used_split = rte_malloc(NULL,\n+\t\tvq->shadow_used_split = rte_malloc_socket(NULL,\n \t\t\t\tvq->size * sizeof(struct vring_used_elem),\n-\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \n \t\tif (!vq->shadow_used_split) {\n \t\t\tVHOST_LOG_CONFIG(ERR,\n@@ -460,9 +460,9 @@ vhost_user_set_vring_num(struct virtio_net **pdev,\n \n \tif (vq->batch_copy_elems)\n \t\trte_free(vq->batch_copy_elems);\n-\tvq->batch_copy_elems = rte_malloc(NULL,\n+\tvq->batch_copy_elems = rte_malloc_socket(NULL,\n \t\t\t\tvq->size * sizeof(struct batch_copy_elem),\n-\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\t\t\tRTE_CACHE_LINE_SIZE, vq->numa_node);\n \tif (!vq->batch_copy_elems) {\n \t\tVHOST_LOG_CONFIG(ERR,\n \t\t\t\"failed to allocate memory for batching copy.\\n\");\n@@ -498,6 +498,9 @@ numa_realloc(struct virtio_net *dev, int index)\n \t\treturn dev;\n \t}\n \n+\tif (node == vq->numa_node)\n+\t\tgoto out_dev_realloc;\n+\n \tif (vq->ready) {\n \t\tvq->ready = false;\n \t\tvhost_user_notify_queue_state(dev, index, 0);\n@@ -558,6 +561,10 @@ numa_realloc(struct virtio_net *dev, int index)\n \t\tvq->log_cache = lc;\n \t}\n \n+\tvq->numa_node = node;\n+\n+out_dev_realloc:\n+\n \tif (dev->flags & VIRTIO_DEV_RUNNING)\n \t\treturn dev;\n \n@@ -1212,7 +1219,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \tstruct virtio_net *dev = *pdev;\n \tstruct VhostUserMemory *memory = &msg->payload.memory;\n \tstruct rte_vhost_mem_region *reg;\n-\n+\tint numa_node = SOCKET_ID_ANY;\n \tuint64_t mmap_offset;\n \tuint32_t i;\n \n@@ -1252,13 +1259,21 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \t\tfor (i = 0; i < dev->nr_vring; i++)\n \t\t\tvhost_user_iotlb_flush_all(dev->virtqueue[i]);\n \n+\t/*\n+\t * If VQ 0 has already been allocated, try to allocate on the same\n+\t * NUMA node. It can be reallocated later in numa_realloc().\n+\t */\n+\tif (dev->nr_vring > 0)\n+\t\tnuma_node = dev->virtqueue[0]->numa_node;\n+\n \tdev->nr_guest_pages = 0;\n \tif (dev->guest_pages == NULL) {\n \t\tdev->max_guest_pages = 8;\n-\t\tdev->guest_pages = rte_zmalloc(NULL,\n+\t\tdev->guest_pages = rte_zmalloc_socket(NULL,\n \t\t\t\t\tdev->max_guest_pages *\n \t\t\t\t\tsizeof(struct guest_page),\n-\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\tnuma_node);\n \t\tif (dev->guest_pages == NULL) {\n \t\t\tVHOST_LOG_CONFIG(ERR,\n \t\t\t\t\"(%d) failed to allocate memory \"\n@@ -1268,8 +1283,8 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \t\t}\n \t}\n \n-\tdev->mem = rte_zmalloc(\"vhost-mem-table\", sizeof(struct rte_vhost_memory) +\n-\t\tsizeof(struct rte_vhost_mem_region) * memory->nregions, 0);\n+\tdev->mem = rte_zmalloc_socket(\"vhost-mem-table\", sizeof(struct rte_vhost_memory) +\n+\t\tsizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node);\n \tif (dev->mem == NULL) {\n \t\tVHOST_LOG_CONFIG(ERR,\n \t\t\t\"(%d) failed to allocate memory for dev->mem\\n\",\n@@ -2192,9 +2207,9 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \t\trte_free(vq->log_cache);\n \t\tvq->log_cache = NULL;\n \t\tvq->log_cache_nb_elem = 0;\n-\t\tvq->log_cache = rte_zmalloc(\"vq log cache\",\n+\t\tvq->log_cache = rte_malloc_socket(\"vq log cache\",\n \t\t\t\tsizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR,\n-\t\t\t\t0);\n+\t\t\t\t0, vq->numa_node);\n \t\t/*\n \t\t * If log cache alloc fail, don't fail migration, but no\n \t\t * caching will be done, which will impact performance\n",
    "prefixes": [
        "v4",
        "6/7"
    ]
}