From patchwork Thu Jun 17 15:37:37 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 94368 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BA1DEA0C4B; Thu, 17 Jun 2021 17:38:59 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A5806410F4; Thu, 17 Jun 2021 17:38:59 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by mails.dpdk.org (Postfix) with ESMTP id 2AD314067A for ; Thu, 17 Jun 2021 17:38:58 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1623944337; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=UE0fflIxnKGxp4dQWAra+skwWi938EzN4dN9tcVLPyM=; b=Fhin/nZeOBaUMi5uuRhEajtE6nl3Ly9W0vZh7fcnz1WGZMMRwxrNs4fcF6V4/gLfc59NOP po6UDUwBN9blpO7F1RjLZyAeN2W8LWJa8/gIHe7MAAtUKqjkyC4pWgeJyrDjJm17oK/58+ d2BuqtxJRmcxKT57dswCIMt42id6hsU= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-86-nSZowSRQPta8BvO9GrrHbA-1; Thu, 17 Jun 2021 11:38:56 -0400 X-MC-Unique: nSZowSRQPta8BvO9GrrHbA-1 Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 2B4E78ABDCA; Thu, 17 Jun 2021 15:38:32 +0000 (UTC) Received: from max-t490s.redhat.com (unknown [10.36.110.45]) by smtp.corp.redhat.com (Postfix) with ESMTP id C55D370138; Thu, 17 Jun 2021 15:38:30 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, david.marchand@redhat.com, chenbo.xia@intel.com Cc: Maxime Coquelin Date: Thu, 17 Jun 2021 17:37:37 +0200 Message-Id: <20210617153739.178011-6-maxime.coquelin@redhat.com> In-Reply-To: <20210617153739.178011-1-maxime.coquelin@redhat.com> References: <20210617153739.178011-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.12 Authentication-Results: relay.mimecast.com; auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Subject: [dpdk-dev] [PATCH v4 5/7] vhost: improve NUMA reallocation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch improves the numa_realloc() function by making use of rte_realloc_socket(), which takes care of the memory copy and freeing of the old data. Suggested-by: David Marchand Signed-off-by: Maxime Coquelin --- lib/vhost/vhost_user.c | 195 ++++++++++++++++++----------------------- 1 file changed, 86 insertions(+), 109 deletions(-) diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 6e7b327ef8..0590ef6d14 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -480,144 +480,121 @@ vhost_user_set_vring_num(struct virtio_net **pdev, static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { - int oldnode, newnode; + int node, dev_node; struct virtio_net *old_dev; - struct vhost_virtqueue *old_vq, *vq; - struct vring_used_elem *new_shadow_used_split; - struct vring_used_elem_packed *new_shadow_used_packed; - struct batch_copy_elem *new_batch_copy_elems; + struct vhost_virtqueue *vq; + struct batch_copy_elem *bce; + struct guest_page *gp; + struct rte_vhost_memory *mem; + size_t mem_size; int ret; old_dev = dev; - vq = old_vq = dev->virtqueue[index]; - - ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, - MPOL_F_NODE | MPOL_F_ADDR); + vq = dev->virtqueue[index]; - /* check if we need to reallocate vq */ - ret |= get_mempolicy(&oldnode, NULL, 0, old_vq, - MPOL_F_NODE | MPOL_F_ADDR); + ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { - VHOST_LOG_CONFIG(ERR, - "Unable to get vq numa information.\n"); + VHOST_LOG_CONFIG(ERR, "Unable to get virtqueue %d numa information.\n", index); return dev; } - if (oldnode != newnode) { - if (vq->ready) { - vq->ready = false; - vhost_user_notify_queue_state(dev, index, 0); - } - VHOST_LOG_CONFIG(INFO, - "reallocate vq from %d to %d node\n", oldnode, newnode); - vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode); - if (!vq) - return dev; + if (vq->ready) { + vq->ready = false; + vhost_user_notify_queue_state(dev, index, 0); + } - memcpy(vq, old_vq, sizeof(*vq)); + vq = rte_realloc_socket(vq, sizeof(*vq), 0, node); + if (!vq) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc virtqueue %d on node %d\n", + index, node); + return dev; + } - if (vq_is_packed(dev)) { - new_shadow_used_packed = rte_malloc_socket(NULL, - vq->size * - sizeof(struct vring_used_elem_packed), - RTE_CACHE_LINE_SIZE, - newnode); - if (new_shadow_used_packed) { - rte_free(vq->shadow_used_packed); - vq->shadow_used_packed = new_shadow_used_packed; - } - } else { - new_shadow_used_split = rte_malloc_socket(NULL, - vq->size * - sizeof(struct vring_used_elem), - RTE_CACHE_LINE_SIZE, - newnode); - if (new_shadow_used_split) { - rte_free(vq->shadow_used_split); - vq->shadow_used_split = new_shadow_used_split; - } - } + if (vq != dev->virtqueue[index]) { + VHOST_LOG_CONFIG(INFO, "reallocated virtqueue on node %d\n", node); + dev->virtqueue[index] = vq; + vhost_user_iotlb_init(dev, index); + } + + if (vq_is_packed(dev)) { + struct vring_used_elem_packed *sup; - new_batch_copy_elems = rte_malloc_socket(NULL, - vq->size * sizeof(struct batch_copy_elem), - RTE_CACHE_LINE_SIZE, - newnode); - if (new_batch_copy_elems) { - rte_free(vq->batch_copy_elems); - vq->batch_copy_elems = new_batch_copy_elems; + sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup), + RTE_CACHE_LINE_SIZE, node); + if (!sup) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow packed on node %d\n", node); + return dev; } + vq->shadow_used_packed = sup; - if (vq->log_cache) { - struct log_cache_entry *log_cache; + } else { + struct vring_used_elem *sus; - log_cache = rte_realloc_socket(vq->log_cache, - sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR, - 0, newnode); - if (log_cache) - vq->log_cache = log_cache; + sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus), + RTE_CACHE_LINE_SIZE, node); + if (!sus) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow split on node %d\n", node); + return dev; } - - rte_free(old_vq); + vq->shadow_used_split = sus; } - if (dev->flags & VIRTIO_DEV_RUNNING) - goto out; - - /* check if we need to reallocate dev */ - ret = get_mempolicy(&oldnode, NULL, 0, old_dev, - MPOL_F_NODE | MPOL_F_ADDR); - if (ret) { - VHOST_LOG_CONFIG(ERR, - "Unable to get dev numa information.\n"); - goto out; + bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce), + RTE_CACHE_LINE_SIZE, node); + if (!bce) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc batch copy elem on node %d\n", node); + return dev; } - if (oldnode != newnode) { - struct rte_vhost_memory *old_mem; - struct guest_page *old_gp; - ssize_t mem_size, gp_size; + vq->batch_copy_elems = bce; - VHOST_LOG_CONFIG(INFO, - "reallocate dev from %d to %d node\n", - oldnode, newnode); - dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode); - if (!dev) { - dev = old_dev; - goto out; - } + if (vq->log_cache) { + struct log_cache_entry *lc; - memcpy(dev, old_dev, sizeof(*dev)); - rte_free(old_dev); - - mem_size = sizeof(struct rte_vhost_memory) + - sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; - old_mem = dev->mem; - dev->mem = rte_malloc_socket(NULL, mem_size, 0, newnode); - if (!dev->mem) { - dev->mem = old_mem; - goto out; + lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node); + if (!lc) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc log cache on node %d\n", node); + return dev; } + vq->log_cache = lc; + } - memcpy(dev->mem, old_mem, mem_size); - rte_free(old_mem); + if (dev->flags & VIRTIO_DEV_RUNNING) + return dev; - gp_size = dev->max_guest_pages * sizeof(*dev->guest_pages); - old_gp = dev->guest_pages; - dev->guest_pages = rte_malloc_socket(NULL, gp_size, RTE_CACHE_LINE_SIZE, newnode); - if (!dev->guest_pages) { - dev->guest_pages = old_gp; - goto out; - } + ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); + if (ret) { + VHOST_LOG_CONFIG(ERR, "Unable to get Virtio dev %d numa information.\n", dev->vid); + return dev; + } - memcpy(dev->guest_pages, old_gp, gp_size); - rte_free(old_gp); + if (dev_node == node) + return dev; + + dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node); + if (!dev) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc dev on node %d\n", node); + return old_dev; } -out: - dev->virtqueue[index] = vq; + VHOST_LOG_CONFIG(INFO, "reallocated device on node %d\n", node); vhost_devices[dev->vid] = dev; - if (old_vq != vq) - vhost_user_iotlb_init(dev, index); + mem_size = sizeof(struct rte_vhost_memory) + + sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; + mem = rte_realloc_socket(dev->mem, mem_size, 0, node); + if (!mem) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc mem table on node %d\n", node); + return dev; + } + dev->mem = mem; + + gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp), + RTE_CACHE_LINE_SIZE, node); + if (!gp) { + VHOST_LOG_CONFIG(ERR, "Failed to realloc guest pages on node %d\n", node); + return dev; + } + dev->guest_pages = gp; return dev; }