get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/99958/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 99958,
    "url": "https://patches.dpdk.org/api/patches/99958/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210929024127.15547-3-xuan.ding@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210929024127.15547-3-xuan.ding@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210929024127.15547-3-xuan.ding@intel.com",
    "date": "2021-09-29T02:41:27",
    "name": "[v6,2/2] vhost: enable IOMMU for async vhost",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d000aa418164c29b1b2c2251bf5f84f7480e9f94",
    "submitter": {
        "id": 1401,
        "url": "https://patches.dpdk.org/api/people/1401/?format=api",
        "name": "Ding, Xuan",
        "email": "xuan.ding@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "https://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210929024127.15547-3-xuan.ding@intel.com/mbox/",
    "series": [
        {
            "id": 19244,
            "url": "https://patches.dpdk.org/api/series/19244/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=19244",
            "date": "2021-09-29T02:41:25",
            "name": "support IOMMU for DMA device",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/19244/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/99958/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/99958/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 98327A0547;\n\tWed, 29 Sep 2021 04:49:38 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 176B1410F1;\n\tWed, 29 Sep 2021 04:49:35 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n by mails.dpdk.org (Postfix) with ESMTP id C4BCC40E3C\n for <dev@dpdk.org>; Wed, 29 Sep 2021 04:49:28 +0200 (CEST)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 28 Sep 2021 19:49:28 -0700",
            "from dpdk-xuanding-dev2.sh.intel.com ([10.67.119.250])\n by fmsmga002.fm.intel.com with ESMTP; 28 Sep 2021 19:49:25 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10121\"; a=\"288506865\"",
            "E=Sophos;i=\"5.85,331,1624345200\"; d=\"scan'208\";a=\"288506865\"",
            "E=Sophos;i=\"5.85,331,1624345200\"; d=\"scan'208\";a=\"562881523\""
        ],
        "X-ExtLoop1": "1",
        "From": "Xuan Ding <xuan.ding@intel.com>",
        "To": "dev@dpdk.org, anatoly.burakov@intel.com, maxime.coquelin@redhat.com,\n chenbo.xia@intel.com",
        "Cc": "jiayu.hu@intel.com, cheng1.jiang@intel.com, bruce.richardson@intel.com,\n sunil.pai.g@intel.com, yinan.wang@intel.com, yvonnex.yang@intel.com,\n Xuan Ding <xuan.ding@intel.com>",
        "Date": "Wed, 29 Sep 2021 02:41:27 +0000",
        "Message-Id": "<20210929024127.15547-3-xuan.ding@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210929024127.15547-1-xuan.ding@intel.com>",
        "References": "<20210901053044.109901-1-xuan.ding@intel.com>\n <20210929024127.15547-1-xuan.ding@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v6 2/2] vhost: enable IOMMU for async vhost",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The use of IOMMU has many advantages, such as isolation and address\ntranslation. This patch extends the capbility of DMA engine to use\nIOMMU if the DMA engine is bound to vfio.\n\nWhen set memory table, the guest memory will be mapped\ninto the default container of DPDK.\n\nSigned-off-by: Xuan Ding <xuan.ding@intel.com>\n---\n lib/vhost/vhost.h      |   4 ++\n lib/vhost/vhost_user.c | 116 ++++++++++++++++++++++++++++++++++++++++-\n 2 files changed, 118 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h\nindex d98ca8adfa..8b8df3897b 100644\n--- a/lib/vhost/vhost.h\n+++ b/lib/vhost/vhost.h\n@@ -370,6 +370,10 @@ struct virtio_net {\n \tint16_t\t\t\tbroadcast_rarp;\n \tuint32_t\t\tnr_vring;\n \tint\t\t\tasync_copy;\n+\n+\t/* Record the dma map status for each region. */\n+\tbool\t\t\t*async_map_status;\n+\n \tint\t\t\textbuf;\n \tint\t\t\tlinearbuf;\n \tstruct vhost_virtqueue\t*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];\ndiff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c\nindex 29a4c9af60..3d2872c85f 100644\n--- a/lib/vhost/vhost_user.c\n+++ b/lib/vhost/vhost_user.c\n@@ -45,6 +45,8 @@\n #include <rte_common.h>\n #include <rte_malloc.h>\n #include <rte_log.h>\n+#include <rte_vfio.h>\n+#include <rte_errno.h>\n \n #include \"iotlb.h\"\n #include \"vhost.h\"\n@@ -141,6 +143,63 @@ get_blk_size(int fd)\n \treturn ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;\n }\n \n+static int\n+async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool do_map)\n+{\n+\tuint64_t host_iova;\n+\tint ret = 0;\n+\n+\thost_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);\n+\tif (do_map) {\n+\t\t/* Add mapped region into the default container of DPDK. */\n+\t\tret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,\n+\t\t\t\t\t\t region->host_user_addr,\n+\t\t\t\t\t\t host_iova,\n+\t\t\t\t\t\t region->size);\n+\t\t*dma_map_success = ret == 0;\n+\n+\t\tif (ret) {\n+\t\t\t/*\n+\t\t\t * DMA device may bind with kernel driver, in this case,\n+\t\t\t * we don't need to program IOMMU manually. However, if no\n+\t\t\t * device is bound with vfio/uio in DPDK, and vfio kernel\n+\t\t\t * module is loaded, the API will still be called and return\n+\t\t\t * with ENODEV/ENOSUP.\n+\t\t\t *\n+\t\t\t * DPDK vfio only returns ENODEV/ENOSUP in very similar\n+\t\t\t * situations(vfio either unsupported, or supported\n+\t\t\t * but no devices found). Either way, no mappings could be\n+\t\t\t * performed. We treat it as normal case in async path.\n+\t\t\t */\n+\t\t\tif (rte_errno == ENODEV && rte_errno == ENOTSUP)\n+\t\t\t\treturn 0;\n+\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"DMA engine map failed\\n\");\n+\t\t\treturn ret;\n+\n+\t\t}\n+\n+\t} else {\n+\t\t/* No need to do vfio unmap if the map failed. */\n+\t\tif (!*dma_map_success)\n+\t\t\treturn 0;\n+\n+\t\t/* Remove mapped region from the default container of DPDK. */\n+\t\tret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,\n+\t\t\t\t\t\t   region->host_user_addr,\n+\t\t\t\t\t\t   host_iova,\n+\t\t\t\t\t\t   region->size);\n+\t\tif (ret) {\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"DMA engine unmap failed\\n\");\n+\t\t\treturn ret;\n+\t\t}\n+\t\t/* Clear the flag once the unmap succeeds. */\n+\t\t*dma_map_success = 0;\n+\t}\n+\n+\treturn ret;\n+}\n+\n static void\n free_mem_region(struct virtio_net *dev)\n {\n@@ -153,6 +212,9 @@ free_mem_region(struct virtio_net *dev)\n \tfor (i = 0; i < dev->mem->nregions; i++) {\n \t\treg = &dev->mem->regions[i];\n \t\tif (reg->host_user_addr) {\n+\t\t\tif (dev->async_copy && rte_vfio_is_enabled(\"vfio\"))\n+\t\t\t\tasync_dma_map(reg, &dev->async_map_status[i], false);\n+\n \t\t\tmunmap(reg->mmap_addr, reg->mmap_size);\n \t\t\tclose(reg->fd);\n \t\t}\n@@ -203,6 +265,11 @@ vhost_backend_cleanup(struct virtio_net *dev)\n \t}\n \n \tdev->postcopy_listening = 0;\n+\n+\tif (dev->async_map_status) {\n+\t\trte_free(dev->async_map_status);\n+\t\tdev->async_map_status = NULL;\n+\t}\n }\n \n static void\n@@ -621,6 +688,19 @@ numa_realloc(struct virtio_net *dev, int index)\n \t}\n \tdev->mem = mem;\n \n+\tif (dev->async_copy && rte_vfio_is_enabled(\"vfio\")) {\n+\t\tif (dev->async_map_status == NULL) {\n+\t\t\tdev->async_map_status = rte_zmalloc_socket(\"async-dma-map-status\",\n+\t\t\t\t\tsizeof(bool) * dev->mem->nregions, 0, node);\n+\t\t\tif (!dev->async_map_status) {\n+\t\t\t\tVHOST_LOG_CONFIG(ERR,\n+\t\t\t\t\t\"(%d) failed to realloc dma mapping status on node\\n\",\n+\t\t\t\t\tdev->vid);\n+\t\t\t\treturn dev;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n \tgp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),\n \t\t\tRTE_CACHE_LINE_SIZE, node);\n \tif (!gp) {\n@@ -1151,12 +1231,14 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,\n static int\n vhost_user_mmap_region(struct virtio_net *dev,\n \t\tstruct rte_vhost_mem_region *region,\n+\t\tuint32_t region_index,\n \t\tuint64_t mmap_offset)\n {\n \tvoid *mmap_addr;\n \tuint64_t mmap_size;\n \tuint64_t alignment;\n \tint populate;\n+\tint ret;\n \n \t/* Check for memory_size + mmap_offset overflow */\n \tif (mmap_offset >= -region->size) {\n@@ -1210,13 +1292,23 @@ vhost_user_mmap_region(struct virtio_net *dev,\n \tregion->mmap_size = mmap_size;\n \tregion->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;\n \n-\tif (dev->async_copy)\n+\tif (dev->async_copy) {\n \t\tif (add_guest_pages(dev, region, alignment) < 0) {\n \t\t\tVHOST_LOG_CONFIG(ERR,\n \t\t\t\t\t\"adding guest pages to region failed.\\n\");\n \t\t\treturn -1;\n \t\t}\n \n+\t\tif (rte_vfio_is_enabled(\"vfio\")) {\n+\t\t\tret = async_dma_map(region, &dev->async_map_status[region_index], true);\n+\t\t\tif (ret) {\n+\t\t\t\tVHOST_LOG_CONFIG(ERR, \"Configure IOMMU for DMA \"\n+\t\t\t\t\t\t\t\"engine failed\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n \tVHOST_LOG_CONFIG(INFO,\n \t\t\t\"guest memory region size: 0x%\" PRIx64 \"\\n\"\n \t\t\t\"\\t guest physical addr: 0x%\" PRIx64 \"\\n\"\n@@ -1289,6 +1381,11 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \t\tfree_mem_region(dev);\n \t\trte_free(dev->mem);\n \t\tdev->mem = NULL;\n+\n+\t\tif (dev->async_map_status) {\n+\t\t\trte_free(dev->async_map_status);\n+\t\t\tdev->async_map_status = NULL;\n+\t\t}\n \t}\n \n \t/* Flush IOTLB cache as previous HVAs are now invalid */\n@@ -1329,6 +1426,17 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \t\tgoto free_guest_pages;\n \t}\n \n+\tif (dev->async_copy) {\n+\t\tdev->async_map_status = rte_zmalloc_socket(\"async-dma-map-status\",\n+\t\t\t\t\tsizeof(bool) * memory->nregions, 0, numa_node);\n+\t\tif (!dev->async_map_status) {\n+\t\t\tVHOST_LOG_CONFIG(ERR,\n+\t\t\t\t\"(%d) failed to allocate memory for dma mapping status\\n\",\n+\t\t\t\tdev->vid);\n+\t\t\tgoto free_mem_table;\n+\t\t}\n+\t}\n+\n \tfor (i = 0; i < memory->nregions; i++) {\n \t\treg = &dev->mem->regions[i];\n \n@@ -1345,7 +1453,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \n \t\tmmap_offset = memory->regions[i].mmap_offset;\n \n-\t\tif (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {\n+\t\tif (vhost_user_mmap_region(dev, reg, i, mmap_offset) < 0) {\n \t\t\tVHOST_LOG_CONFIG(ERR, \"Failed to mmap region %u\\n\", i);\n \t\t\tgoto free_mem_table;\n \t\t}\n@@ -1393,6 +1501,10 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \tfree_mem_region(dev);\n \trte_free(dev->mem);\n \tdev->mem = NULL;\n+\tif (dev->async_map_status) {\n+\t\trte_free(dev->async_map_status);\n+\t\tdev->async_map_status = NULL;\n+\t}\n free_guest_pages:\n \trte_free(dev->guest_pages);\n \tdev->guest_pages = NULL;\n",
    "prefixes": [
        "v6",
        "2/2"
    ]
}