get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/107566/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 107566,
    "url": "https://patches.dpdk.org/api/patches/107566/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220215150226.98292-3-xuan.ding@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220215150226.98292-3-xuan.ding@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220215150226.98292-3-xuan.ding@intel.com",
    "date": "2022-02-15T15:02:26",
    "name": "[v3,2/2] vhost: fix physical address mapping",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "05041a581394d97cd0fe0f4dc2ec55e5fc92d719",
    "submitter": {
        "id": 1401,
        "url": "https://patches.dpdk.org/api/people/1401/?format=api",
        "name": "Ding, Xuan",
        "email": "xuan.ding@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "https://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220215150226.98292-3-xuan.ding@intel.com/mbox/",
    "series": [
        {
            "id": 21669,
            "url": "https://patches.dpdk.org/api/series/21669/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=21669",
            "date": "2022-02-15T15:02:24",
            "name": "vhost: fix async address mapping",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/21669/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/107566/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/107566/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 62E4AA00C5;\n\tTue, 15 Feb 2022 07:26:10 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3E7E441156;\n\tTue, 15 Feb 2022 07:26:06 +0100 (CET)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by mails.dpdk.org (Postfix) with ESMTP id 5557341144;\n Tue, 15 Feb 2022 07:26:04 +0100 (CET)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 14 Feb 2022 22:26:03 -0800",
            "from npg-dpdk-xuan-cbdma.sh.intel.com ([10.67.110.228])\n by orsmga001.jf.intel.com with ESMTP; 14 Feb 2022 22:26:00 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1644906364; x=1676442364;\n h=from:to:cc:subject:date:message-id:in-reply-to: references;\n bh=YoeFrmCXHV4Yc9I+eWXRAKMTrBAA5RuZB/oo7WWeSZ0=;\n b=V0z9+15lc+m95APEofaSvC3okhulgv6BzSwEOG/+wmlqE8JtT6GlUrsb\n dZ8z+QV0I1Yv3Rv/VLEtXiAitwcM9ABSIGi/7Osoa70D1THZ8t4seSPJu\n AY0hyizqIU1LOqQOm53K9O7HPGRhF+4TsVJhJE6REaIhy4BJj5pKizFp4\n nVZ0ZrThIEnlWTrHELzOrHF94NFmi5Ldy1nNBenYuk7twaIUBw5uLR3Kr\n 9aFvtel4GLtEzZp2wxP70R468K7sp3WV/wFkUX/mjDdQV7LUqpcpWH8As\n Gu/LVz6uiFdGH/SH0Qk3L30U+MzpdcelrbnfMMiIn4iIBfebvRHZYbS1j g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10258\"; a=\"233806865\"",
            "E=Sophos;i=\"5.88,370,1635231600\"; d=\"scan'208\";a=\"233806865\"",
            "E=Sophos;i=\"5.88,370,1635231600\"; d=\"scan'208\";a=\"570656231\""
        ],
        "X-ExtLoop1": "1",
        "From": "xuan.ding@intel.com",
        "To": "maxime.coquelin@redhat.com,\n\tchenbo.xia@intel.com",
        "Cc": "dev@dpdk.org, ktraynor@redhat.com, jiayu.hu@intel.com,\n yuanx.wang@intel.com, Xuan Ding <xuan.ding@intel.com>, stable@dpdk.org",
        "Subject": "[PATCH v3 2/2] vhost: fix physical address mapping",
        "Date": "Tue, 15 Feb 2022 15:02:26 +0000",
        "Message-Id": "<20220215150226.98292-3-xuan.ding@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20220215150226.98292-1-xuan.ding@intel.com>",
        "References": "<20220215150226.98292-1-xuan.ding@intel.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Xuan Ding <xuan.ding@intel.com>\n\nWhen choosing IOVA as PA mode, IOVA is likely to be discontinuous,\nwhich requires page by page mapping for DMA devices. To be consistent,\nthis patch implements page by page mapping instead of mapping at the\nregion granularity for both IOVA as VA and PA mode.\n\nFixes: 7c61fa08b716 (\"vhost: enable IOMMU for async vhost\")\nCc: stable@dpdk.org\n\nSigned-off-by: Xuan Ding <xuan.ding@intel.com>\nSigned-off-by: Yuan Wang <yuanx.wang@intel.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/vhost/vhost.h      |   1 +\n lib/vhost/vhost_user.c | 119 ++++++++++++++++++++---------------------\n 2 files changed, 58 insertions(+), 62 deletions(-)",
    "diff": "diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h\nindex c5d8b84320..d4586f3341 100644\n--- a/lib/vhost/vhost.h\n+++ b/lib/vhost/vhost.h\n@@ -355,6 +355,7 @@ struct vring_packed_desc_event {\n struct guest_page {\n \tuint64_t guest_phys_addr;\n \tuint64_t host_iova;\n+\tuint64_t host_user_addr;\n \tuint64_t size;\n };\n \ndiff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c\nindex cd066d8af0..981790ea53 100644\n--- a/lib/vhost/vhost_user.c\n+++ b/lib/vhost/vhost_user.c\n@@ -142,57 +142,57 @@ get_blk_size(int fd)\n \treturn ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;\n }\n \n-static int\n-async_dma_map(struct virtio_net *dev, struct rte_vhost_mem_region *region, bool do_map)\n+static void\n+async_dma_map(struct virtio_net *dev, bool do_map)\n {\n-\tuint64_t host_iova;\n \tint ret = 0;\n+\tuint32_t i;\n+\tstruct guest_page *page;\n \n-\thost_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);\n \tif (do_map) {\n-\t\t/* Add mapped region into the default container of DPDK. */\n-\t\tret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,\n-\t\t\t\t\t\t region->host_user_addr,\n-\t\t\t\t\t\t host_iova,\n-\t\t\t\t\t\t region->size);\n-\t\tif (ret) {\n-\t\t\t/*\n-\t\t\t * DMA device may bind with kernel driver, in this case,\n-\t\t\t * we don't need to program IOMMU manually. However, if no\n-\t\t\t * device is bound with vfio/uio in DPDK, and vfio kernel\n-\t\t\t * module is loaded, the API will still be called and return\n-\t\t\t * with ENODEV/ENOSUP.\n-\t\t\t *\n-\t\t\t * DPDK vfio only returns ENODEV/ENOSUP in very similar\n-\t\t\t * situations(vfio either unsupported, or supported\n-\t\t\t * but no devices found). Either way, no mappings could be\n-\t\t\t * performed. We treat it as normal case in async path.\n-\t\t\t */\n-\t\t\tif (rte_errno == ENODEV || rte_errno == ENOTSUP)\n-\t\t\t\treturn 0;\n-\n-\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) DMA engine map failed\\n\", dev->ifname);\n-\t\t\t/* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */\n-\t\t\treturn 0;\n+\t\tfor (i = 0; i < dev->nr_guest_pages; i++) {\n+\t\t\tpage = &dev->guest_pages[i];\n+\t\t\tret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,\n+\t\t\t\t\t\t\t page->host_user_addr,\n+\t\t\t\t\t\t\t page->host_iova,\n+\t\t\t\t\t\t\t page->size);\n+\t\t\tif (ret) {\n+\t\t\t\t/*\n+\t\t\t\t * DMA device may bind with kernel driver, in this case,\n+\t\t\t\t * we don't need to program IOMMU manually. However, if no\n+\t\t\t\t * device is bound with vfio/uio in DPDK, and vfio kernel\n+\t\t\t\t * module is loaded, the API will still be called and return\n+\t\t\t\t * with ENODEV.\n+\t\t\t\t *\n+\t\t\t\t * DPDK vfio only returns ENODEV in very similar situations\n+\t\t\t\t * (vfio either unsupported, or supported but no devices found).\n+\t\t\t\t * Either way, no mappings could be performed. We treat it as\n+\t\t\t\t * normal case in async path. This is a workaround.\n+\t\t\t\t */\n+\t\t\t\tif (rte_errno == ENODEV)\n+\t\t\t\t\treturn;\n+\n+\t\t\t\t/* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */\n+\t\t\t\tVHOST_LOG_CONFIG(ERR, \"DMA engine map failed\\n\");\n+\t\t\t}\n \t\t}\n \n \t} else {\n-\t\t/* Remove mapped region from the default container of DPDK. */\n-\t\tret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,\n-\t\t\t\t\t\t   region->host_user_addr,\n-\t\t\t\t\t\t   host_iova,\n-\t\t\t\t\t\t   region->size);\n-\t\tif (ret) {\n-\t\t\t/* like DMA map, ignore the kernel driver case when unmap. */\n-\t\t\tif (rte_errno == EINVAL)\n-\t\t\t\treturn 0;\n-\n-\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) DMA engine unmap failed\\n\", dev->ifname);\n-\t\t\treturn ret;\n+\t\tfor (i = 0; i < dev->nr_guest_pages; i++) {\n+\t\t\tpage = &dev->guest_pages[i];\n+\t\t\tret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,\n+\t\t\t\t\t\t\t   page->host_user_addr,\n+\t\t\t\t\t\t\t   page->host_iova,\n+\t\t\t\t\t\t\t   page->size);\n+\t\t\tif (ret) {\n+\t\t\t\t/* like DMA map, ignore the kernel driver case when unmap. */\n+\t\t\t\tif (rte_errno == EINVAL)\n+\t\t\t\t\treturn;\n+\n+\t\t\t\tVHOST_LOG_CONFIG(ERR, \"DMA engine unmap failed\\n\");\n+\t\t\t}\n \t\t}\n \t}\n-\n-\treturn ret;\n }\n \n static void\n@@ -204,12 +204,12 @@ free_mem_region(struct virtio_net *dev)\n \tif (!dev || !dev->mem)\n \t\treturn;\n \n+\tif (dev->async_copy && rte_vfio_is_enabled(\"vfio\"))\n+\t\tasync_dma_map(dev, false);\n+\n \tfor (i = 0; i < dev->mem->nregions; i++) {\n \t\treg = &dev->mem->regions[i];\n \t\tif (reg->host_user_addr) {\n-\t\t\tif (dev->async_copy && rte_vfio_is_enabled(\"vfio\"))\n-\t\t\t\tasync_dma_map(dev, reg, false);\n-\n \t\t\tmunmap(reg->mmap_addr, reg->mmap_size);\n \t\t\tclose(reg->fd);\n \t\t}\n@@ -985,7 +985,7 @@ vhost_user_set_vring_base(struct virtio_net **pdev,\n \n static int\n add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,\n-\t\t   uint64_t host_iova, uint64_t size)\n+\t\t   uint64_t host_iova, uint64_t host_user_addr, uint64_t size)\n {\n \tstruct guest_page *page, *last_page;\n \tstruct guest_page *old_pages;\n@@ -997,7 +997,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,\n \t\t\t\t\tdev->max_guest_pages * sizeof(*page),\n \t\t\t\t\tRTE_CACHE_LINE_SIZE);\n \t\tif (dev->guest_pages == NULL) {\n-\t\t\tVHOST_LOG_CONFIG(ERR, \"(%s) cannot realloc guest_pages\\n\", dev->ifname);\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"cannot realloc guest_pages\\n\");\n \t\t\trte_free(old_pages);\n \t\t\treturn -1;\n \t\t}\n@@ -1006,8 +1006,9 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,\n \tif (dev->nr_guest_pages > 0) {\n \t\tlast_page = &dev->guest_pages[dev->nr_guest_pages - 1];\n \t\t/* merge if the two pages are continuous */\n-\t\tif (host_iova == last_page->host_iova +\n-\t\t\t\t      last_page->size) {\n+\t\tif (host_iova == last_page->host_iova + last_page->size &&\n+\t\t    guest_phys_addr == last_page->guest_phys_addr + last_page->size &&\n+\t\t    host_user_addr == last_page->host_user_addr + last_page->size) {\n \t\t\tlast_page->size += size;\n \t\t\treturn 0;\n \t\t}\n@@ -1016,6 +1017,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,\n \tpage = &dev->guest_pages[dev->nr_guest_pages++];\n \tpage->guest_phys_addr = guest_phys_addr;\n \tpage->host_iova  = host_iova;\n+\tpage->host_user_addr = host_user_addr;\n \tpage->size = size;\n \n \treturn 0;\n@@ -1035,7 +1037,8 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,\n \tsize = page_size - (guest_phys_addr & (page_size - 1));\n \tsize = RTE_MIN(size, reg_size);\n \n-\tif (add_one_guest_page(dev, guest_phys_addr, host_iova, size) < 0)\n+\tif (add_one_guest_page(dev, guest_phys_addr, host_iova,\n+\t\t\t       host_user_addr, size) < 0)\n \t\treturn -1;\n \n \thost_user_addr  += size;\n@@ -1047,7 +1050,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,\n \t\thost_iova = rte_mem_virt2iova((void *)(uintptr_t)\n \t\t\t\t\t\t  host_user_addr);\n \t\tif (add_one_guest_page(dev, guest_phys_addr, host_iova,\n-\t\t\t\tsize) < 0)\n+\t\t\t\t       host_user_addr, size) < 0)\n \t\t\treturn -1;\n \n \t\thost_user_addr  += size;\n@@ -1223,7 +1226,6 @@ vhost_user_mmap_region(struct virtio_net *dev,\n \tuint64_t mmap_size;\n \tuint64_t alignment;\n \tint populate;\n-\tint ret;\n \n \t/* Check for memory_size + mmap_offset overflow */\n \tif (mmap_offset >= -region->size) {\n@@ -1280,16 +1282,6 @@ vhost_user_mmap_region(struct virtio_net *dev,\n \t\t\t\t\tdev->ifname);\n \t\t\treturn -1;\n \t\t}\n-\n-\t\tif (rte_vfio_is_enabled(\"vfio\")) {\n-\t\t\tret = async_dma_map(dev, region, true);\n-\t\t\tif (ret) {\n-\t\t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\t\"(%s) configure IOMMU for DMA engine failed\\n\",\n-\t\t\t\t\tdev->ifname);\n-\t\t\t\treturn -1;\n-\t\t\t}\n-\t\t}\n \t}\n \n \tVHOST_LOG_CONFIG(INFO, \"(%s) guest memory region size: 0x%\" PRIx64 \"\\n\",\n@@ -1426,6 +1418,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev,\n \t\tdev->mem->nregions++;\n \t}\n \n+\tif (dev->async_copy && rte_vfio_is_enabled(\"vfio\"))\n+\t\tasync_dma_map(dev, true);\n+\n \tif (vhost_user_postcopy_register(dev, main_fd, ctx) < 0)\n \t\tgoto free_mem_table;\n \n",
    "prefixes": [
        "v3",
        "2/2"
    ]
}