get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83860/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83860,
    "url": "http://patches.dpdk.org/api/patches/83860/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201109203528.132797-2-drc@linux.vnet.ibm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201109203528.132797-2-drc@linux.vnet.ibm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201109203528.132797-2-drc@linux.vnet.ibm.com",
    "date": "2020-11-09T20:35:28",
    "name": "[v6,1/1] vfio: modify spapr iommu support to use static window sizing",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0199e0e69f54a277c9ed9d4ce33bd0b2f8890988",
    "submitter": {
        "id": 1256,
        "url": "http://patches.dpdk.org/api/people/1256/?format=api",
        "name": "David Christensen",
        "email": "drc@linux.vnet.ibm.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201109203528.132797-2-drc@linux.vnet.ibm.com/mbox/",
    "series": [
        {
            "id": 13752,
            "url": "http://patches.dpdk.org/api/series/13752/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13752",
            "date": "2020-11-09T20:35:28",
            "name": "[v6,1/1] vfio: modify spapr iommu support to use static window sizing",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/13752/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/83860/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/83860/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 94A94A0527;\n\tMon,  9 Nov 2020 21:35:58 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5836172E2;\n\tMon,  9 Nov 2020 21:35:40 +0100 (CET)",
            "from mx0a-001b2d01.pphosted.com (mx0a-001b2d01.pphosted.com\n [148.163.156.1]) by dpdk.org (Postfix) with ESMTP id 7AD44697B\n for <dev@dpdk.org>; Mon,  9 Nov 2020 21:35:37 +0100 (CET)",
            "from pps.filterd (m0098393.ppops.net [127.0.0.1])\n by mx0a-001b2d01.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n 0A9KXQo0095717; Mon, 9 Nov 2020 15:35:35 -0500",
            "from pps.reinject (localhost [127.0.0.1])\n by mx0a-001b2d01.pphosted.com with ESMTP id 34qc9es1vx-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT);\n Mon, 09 Nov 2020 15:35:35 -0500",
            "from m0098393.ppops.net (m0098393.ppops.net [127.0.0.1])\n by pps.reinject (8.16.0.36/8.16.0.36) with SMTP id 0A9KXUjF096071;\n Mon, 9 Nov 2020 15:35:35 -0500",
            "from ppma04dal.us.ibm.com (7a.29.35a9.ip4.static.sl-reverse.com\n [169.53.41.122])\n by mx0a-001b2d01.pphosted.com with ESMTP id 34qc9es1vp-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT);\n Mon, 09 Nov 2020 15:35:34 -0500",
            "from pps.filterd (ppma04dal.us.ibm.com [127.0.0.1])\n by ppma04dal.us.ibm.com (8.16.0.42/8.16.0.42) with SMTP id 0A9KW3vD027901;\n Mon, 9 Nov 2020 20:35:34 GMT",
            "from b01cxnp22033.gho.pok.ibm.com (b01cxnp22033.gho.pok.ibm.com\n [9.57.198.23]) by ppma04dal.us.ibm.com with ESMTP id 34nk79qrsg-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT);\n Mon, 09 Nov 2020 20:35:34 +0000",
            "from b01ledav006.gho.pok.ibm.com (b01ledav006.gho.pok.ibm.com\n [9.57.199.111])\n by b01cxnp22033.gho.pok.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id\n 0A9KZXuL8913438\n (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK);\n Mon, 9 Nov 2020 20:35:33 GMT",
            "from b01ledav006.gho.pok.ibm.com (unknown [127.0.0.1])\n by IMSVA (Postfix) with ESMTP id 3AA18AC05B;\n Mon,  9 Nov 2020 20:35:33 +0000 (GMT)",
            "from b01ledav006.gho.pok.ibm.com (unknown [127.0.0.1])\n by IMSVA (Postfix) with ESMTP id 18C45AC059;\n Mon,  9 Nov 2020 20:35:33 +0000 (GMT)",
            "from localhost.localdomain (unknown [9.114.224.51])\n by b01ledav006.gho.pok.ibm.com (Postfix) with ESMTP;\n Mon,  9 Nov 2020 20:35:33 +0000 (GMT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=ibm.com;\n h=from : to : cc : subject\n : date : message-id : in-reply-to : references; s=pp1;\n bh=HIbuz22AzFjdpcbgj0y5e8AlSHEzL+UglJkBX45UdI4=;\n b=qiYZX1KT/CtOKVwvAr1L9w6Kw+5z1JbXEXEsrYOca8JoSul13UWIM0VG2qbD2o1sJuu0\n atxq/y8EdlSL1nhik9kx9qAe2UzUTaDAZmMOxcg8uMekH/DKCTwd3rdaLZk8tMd1sfbI\n 9ujCygbBdV40XsVOtc5eUq+kMSj5oItJE34Tr5HxeS5T1mW4VSapoAsIggI8WINC0mx+\n kXDiV6BZEwJVMtHCkbNnmcZll5d7N6/cvcc5R7Ov1H/Bonw5Sg+GgkQhyequ4R1N1Uvl\n 8H/XW2+yZavEr0fgTMiRLuQLR29BhSrJKs4a89LlD86qHMZf0oUtR5MaguSMGmOJy7Pf sQ==",
        "From": "David Christensen <drc@linux.vnet.ibm.com>",
        "To": "dev@dpdk.org, anatoly.burakov@intel.com, david.marchand@redhat.com",
        "Cc": "David Christensen <drc@linux.vnet.ibm.com>",
        "Date": "Mon,  9 Nov 2020 12:35:28 -0800",
        "Message-Id": "<20201109203528.132797-2-drc@linux.vnet.ibm.com>",
        "X-Mailer": "git-send-email 2.18.4",
        "In-Reply-To": "<20201109203528.132797-1-drc@linux.vnet.ibm.com>",
        "References": "<20201103220532.176225-1-drc@linux.vnet.ibm.com>\n <20201109203528.132797-1-drc@linux.vnet.ibm.com>",
        "X-TM-AS-GCONF": "00",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.312, 18.0.737\n definitions=2020-11-09_13:2020-11-05,\n 2020-11-09 signatures=0",
        "X-Proofpoint-Spam-Details": "rule=outbound_notspam policy=outbound score=0\n spamscore=0 mlxscore=0\n phishscore=0 adultscore=0 mlxlogscore=999 malwarescore=0\n lowpriorityscore=0 priorityscore=1501 bulkscore=0 suspectscore=2\n clxscore=1015 impostorscore=0 classifier=spam adjust=0 reason=mlx\n scancount=1 engine=8.12.0-2009150000 definitions=main-2011090137",
        "Subject": "[dpdk-dev] [PATCH v6 1/1] vfio: modify spapr iommu support to use\n\tstatic window sizing",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The SPAPR IOMMU requires that a DMA window size be defined before memory\ncan be mapped for DMA. Current code dynamically modifies the DMA window\nsize in response to every new memory allocation which is potentially\ndangerous because all existing mappings need to be unmapped/remapped in\norder to resize the DMA window, leaving hardware holding IOVA addresses\nthat are temporarily unmapped.  The new SPAPR code statically assigns\nthe DMA window size on first use, using the largest physical memory\nmemory address when IOVA=PA and the highest existing memseg virtual\naddress when IOVA=VA.\n\nSigned-off-by: David Christensen <drc@linux.vnet.ibm.com>\n---\n lib/librte_eal/linux/eal_vfio.c | 430 +++++++++++++++-----------------\n 1 file changed, 207 insertions(+), 223 deletions(-)",
    "diff": "diff --git a/lib/librte_eal/linux/eal_vfio.c b/lib/librte_eal/linux/eal_vfio.c\nindex 380f2f44a..050082444 100644\n--- a/lib/librte_eal/linux/eal_vfio.c\n+++ b/lib/librte_eal/linux/eal_vfio.c\n@@ -18,6 +18,7 @@\n #include \"eal_memcfg.h\"\n #include \"eal_vfio.h\"\n #include \"eal_private.h\"\n+#include \"eal_internal_cfg.h\"\n \n #ifdef VFIO_PRESENT\n \n@@ -536,17 +537,6 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,\n \t\treturn;\n \t}\n \n-#ifdef RTE_ARCH_PPC_64\n-\tms = rte_mem_virt2memseg(addr, msl);\n-\twhile (cur_len < len) {\n-\t\tint idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);\n-\n-\t\trte_fbarray_set_free(&msl->memseg_arr, idx);\n-\t\tcur_len += ms->len;\n-\t\t++ms;\n-\t}\n-\tcur_len = 0;\n-#endif\n \t/* memsegs are contiguous in memory */\n \tms = rte_mem_virt2memseg(addr, msl);\n \n@@ -607,17 +597,6 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,\n \t\t\t\t\t\tiova_expected - iova_start, 0);\n \t\t}\n \t}\n-#ifdef RTE_ARCH_PPC_64\n-\tcur_len = 0;\n-\tms = rte_mem_virt2memseg(addr, msl);\n-\twhile (cur_len < len) {\n-\t\tint idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);\n-\n-\t\trte_fbarray_set_used(&msl->memseg_arr, idx);\n-\t\tcur_len += ms->len;\n-\t\t++ms;\n-\t}\n-#endif\n }\n \n static int\n@@ -1436,21 +1415,30 @@ vfio_type1_dma_map(int vfio_container_fd)\n \treturn rte_memseg_walk(type1_map, &vfio_container_fd);\n }\n \n+/* Track the size of the statically allocated DMA window for SPAPR */\n+uint64_t spapr_dma_win_len;\n+uint64_t spapr_dma_win_page_sz;\n+\n static int\n vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,\n \t\tuint64_t len, int do_map)\n {\n-\tstruct vfio_iommu_type1_dma_map dma_map;\n-\tstruct vfio_iommu_type1_dma_unmap dma_unmap;\n-\tint ret;\n \tstruct vfio_iommu_spapr_register_memory reg = {\n \t\t.argsz = sizeof(reg),\n+\t\t.vaddr = (uintptr_t) vaddr,\n+\t\t.size = len,\n \t\t.flags = 0\n \t};\n-\treg.vaddr = (uintptr_t) vaddr;\n-\treg.size = len;\n+\tint ret;\n \n \tif (do_map != 0) {\n+\t\tstruct vfio_iommu_type1_dma_map dma_map;\n+\n+\t\tif (iova + len > spapr_dma_win_len) {\n+\t\t\tRTE_LOG(ERR, EAL, \"  dma map attempt outside DMA window\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n \t\tret = ioctl(vfio_container_fd,\n \t\t\t\tVFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);\n \t\tif (ret) {\n@@ -1469,24 +1457,14 @@ vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,\n \n \t\tret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);\n \t\tif (ret) {\n-\t\t\t/**\n-\t\t\t * In case the mapping was already done EBUSY will be\n-\t\t\t * returned from kernel.\n-\t\t\t */\n-\t\t\tif (errno == EBUSY) {\n-\t\t\t\tRTE_LOG(DEBUG, EAL,\n-\t\t\t\t\t\" Memory segment is already mapped,\"\n-\t\t\t\t\t\" skipping\");\n-\t\t\t} else {\n-\t\t\t\tRTE_LOG(ERR, EAL,\n-\t\t\t\t\t\"  cannot set up DMA remapping,\"\n-\t\t\t\t\t\" error %i (%s)\\n\", errno,\n-\t\t\t\t\tstrerror(errno));\n-\t\t\t\treturn -1;\n-\t\t\t}\n+\t\t\tRTE_LOG(ERR, EAL, \"  cannot map vaddr for IOMMU, error %i (%s)\\n\",\n+\t\t\t\terrno, strerror(errno));\n+\t\t\treturn -1;\n \t\t}\n \n \t} else {\n+\t\tstruct vfio_iommu_type1_dma_map dma_unmap;\n+\n \t\tmemset(&dma_unmap, 0, sizeof(dma_unmap));\n \t\tdma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);\n \t\tdma_unmap.size = len;\n@@ -1495,8 +1473,8 @@ vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,\n \t\tret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,\n \t\t\t\t&dma_unmap);\n \t\tif (ret) {\n-\t\t\tRTE_LOG(ERR, EAL, \"  cannot clear DMA remapping, error %i (%s)\\n\",\n-\t\t\t\t\terrno, strerror(errno));\n+\t\t\tRTE_LOG(ERR, EAL, \"  cannot unmap vaddr for IOMMU, error %i (%s)\\n\",\n+\t\t\t\terrno, strerror(errno));\n \t\t\treturn -1;\n \t\t}\n \n@@ -1504,12 +1482,12 @@ vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,\n \t\t\t\tVFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);\n \t\tif (ret) {\n \t\t\tRTE_LOG(ERR, EAL, \"  cannot unregister vaddr for IOMMU, error %i (%s)\\n\",\n-\t\t\t\t\terrno, strerror(errno));\n+\t\t\t\terrno, strerror(errno));\n \t\t\treturn -1;\n \t\t}\n \t}\n \n-\treturn 0;\n+\treturn ret;\n }\n \n static int\n@@ -1526,251 +1504,257 @@ vfio_spapr_map_walk(const struct rte_memseg_list *msl,\n \tif (ms->iova == RTE_BAD_IOVA)\n \t\treturn 0;\n \n-\treturn vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,\n-\t\t\tms->len, 1);\n+\treturn vfio_spapr_dma_do_map(*vfio_container_fd,\n+\t\tms->addr_64, ms->iova, ms->len, 1);\n }\n \n+struct spapr_size_walk_param {\n+\tuint64_t max_va;\n+\tuint64_t page_sz;\n+\tbool is_user_managed;\n+};\n+\n+/*\n+ * In order to set the DMA window size required for the SPAPR IOMMU\n+ * we need to walk the existing virtual memory allocations as well as\n+ * find the hugepage size used.\n+ */\n static int\n-vfio_spapr_unmap_walk(const struct rte_memseg_list *msl,\n-\t\tconst struct rte_memseg *ms, void *arg)\n+vfio_spapr_size_walk(const struct rte_memseg_list *msl, void *arg)\n {\n-\tint *vfio_container_fd = arg;\n+\tstruct spapr_size_walk_param *param = arg;\n+\tuint64_t max = (uint64_t) msl->base_va + (uint64_t) msl->len;\n \n-\t/* skip external memory that isn't a heap */\n-\tif (msl->external && !msl->heap)\n+\tif (msl->external && !msl->heap) {\n+\t\t/* ignore user managed external memory */\n+\t\tparam->is_user_managed = true;\n \t\treturn 0;\n+\t}\n \n-\t/* skip any segments with invalid IOVA addresses */\n-\tif (ms->iova == RTE_BAD_IOVA)\n-\t\treturn 0;\n+\tif (max > param->max_va) {\n+\t\tparam->page_sz = msl->page_sz;\n+\t\tparam->max_va = max;\n+\t}\n \n-\treturn vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,\n-\t\t\tms->len, 0);\n+\treturn 0;\n }\n \n-struct spapr_walk_param {\n-\tuint64_t window_size;\n-\tuint64_t hugepage_sz;\n-};\n-\n+/*\n+ * Find the highest memory address used in physical or virtual address\n+ * space and use that as the top of the DMA window.\n+ */\n static int\n-vfio_spapr_window_size_walk(const struct rte_memseg_list *msl,\n-\t\tconst struct rte_memseg *ms, void *arg)\n+find_highest_mem_addr(struct spapr_size_walk_param *param)\n {\n-\tstruct spapr_walk_param *param = arg;\n-\tuint64_t max = ms->iova + ms->len;\n+\t/* find the maximum IOVA address for setting the DMA window size */\n+\tif (rte_eal_iova_mode() == RTE_IOVA_PA) {\n+\t\tstatic const char proc_iomem[] = \"/proc/iomem\";\n+\t\tstatic const char str_sysram[] = \"System RAM\";\n+\t\tuint64_t start, end, max = 0;\n+\t\tchar *line = NULL;\n+\t\tchar *dash, *space;\n+\t\tsize_t line_len;\n \n-\t/* skip external memory that isn't a heap */\n-\tif (msl->external && !msl->heap)\n+\t\t/*\n+\t\t * Example \"System RAM\" in /proc/iomem:\n+\t\t * 00000000-1fffffffff : System RAM\n+\t\t * 200000000000-201fffffffff : System RAM\n+\t\t */\n+\t\tFILE *fd = fopen(proc_iomem, \"r\");\n+\t\tif (fd == NULL) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Cannot open %s\\n\", proc_iomem);\n+\t\t\treturn -1;\n+\t\t}\n+\t\t/* Scan /proc/iomem for the highest PA in the system */\n+\t\twhile (getline(&line, &line_len, fd) != -1) {\n+\t\t\tif (strstr(line, str_sysram) == NULL)\n+\t\t\t\tcontinue;\n+\n+\t\t\tspace = strstr(line, \" \");\n+\t\t\tdash = strstr(line, \"-\");\n+\n+\t\t\t/* Validate the format of the memory string */\n+\t\t\tif (space == NULL || dash == NULL || space < dash) {\n+\t\t\t\tRTE_LOG(ERR, EAL, \"Can't parse line \\\"%s\\\" in file %s\\n\",\n+\t\t\t\t\tline, proc_iomem);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\tstart = strtoull(line, NULL, 16);\n+\t\t\tend   = strtoull(dash + 1, NULL, 16);\n+\t\t\tRTE_LOG(DEBUG, EAL, \"Found system RAM from 0x%\" PRIx64\n+\t\t\t\t\" to 0x%\" PRIx64 \"\\n\", start, end);\n+\t\t\tif (end > max)\n+\t\t\t\tmax = end;\n+\t\t}\n+\t\tfree(line);\n+\t\tfclose(fd);\n+\n+\t\tif (max == 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Failed to find valid \\\"System RAM\\\" \"\n+\t\t\t\t\"entry in file %s\\n\", proc_iomem);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tspapr_dma_win_len = rte_align64pow2(max + 1);\n \t\treturn 0;\n+\t} else if (rte_eal_iova_mode() == RTE_IOVA_VA) {\n+\t\tRTE_LOG(DEBUG, EAL, \"Highest VA address in memseg list is 0x%\"\n+\t\t\tPRIx64 \"\\n\", param->max_va);\n+\t\tspapr_dma_win_len = rte_align64pow2(param->max_va);\n+\t\treturn 0;\n+\t}\n \n-\t/* skip any segments with invalid IOVA addresses */\n-\tif (ms->iova == RTE_BAD_IOVA)\n+\tspapr_dma_win_len = 0;\n+\tRTE_LOG(ERR, EAL, \"Unsupported IOVA mode\\n\");\n+\treturn -1;\n+}\n+\n+\n+/*\n+ * The SPAPRv2 IOMMU supports 2 DMA windows with starting\n+ * address at 0 or 1<<59.  By default, a DMA window is set\n+ * at address 0, 2GB long, with a 4KB page.  For DPDK we\n+ * must remove the default window and setup a new DMA window\n+ * based on the hugepage size and memory requirements of\n+ * the application before we can map memory for DMA.\n+ */\n+static int\n+spapr_dma_win_size(void)\n+{\n+\tstruct spapr_size_walk_param param;\n+\n+\t/* only create DMA window once */\n+\tif (spapr_dma_win_len > 0)\n \t\treturn 0;\n \n-\tif (max > param->window_size) {\n-\t\tparam->hugepage_sz = ms->hugepage_sz;\n-\t\tparam->window_size = max;\n+\t/* walk the memseg list to find the page size/max VA address */\n+\tmemset(&param, 0, sizeof(param));\n+\tif (rte_memseg_list_walk(vfio_spapr_size_walk, &param) < 0) {\n+\t\tRTE_LOG(ERR, EAL, \"Failed to walk memseg list for DMA window size\\n\");\n+\t\treturn -1;\n \t}\n \n+\t/* we can't be sure if DMA window covers external memory */\n+\tif (param.is_user_managed)\n+\t\tRTE_LOG(WARNING, EAL, \"Detected user managed external memory which may not be managed by the IOMMU\\n\");\n+\n+\t/* check physical/virtual memory size */\n+\tif (find_highest_mem_addr(&param) < 0)\n+\t\treturn -1;\n+\tRTE_LOG(DEBUG, EAL, \"Setting DMA window size to 0x%\" PRIx64 \"\\n\",\n+\t\tspapr_dma_win_len);\n+\tspapr_dma_win_page_sz = param.page_sz;\n+\trte_mem_set_dma_mask(__builtin_ctzll(spapr_dma_win_len));\n \treturn 0;\n }\n \n static int\n-vfio_spapr_create_new_dma_window(int vfio_container_fd,\n-\t\tstruct vfio_iommu_spapr_tce_create *create) {\n+vfio_spapr_create_dma_window(int vfio_container_fd)\n+{\n+\tstruct vfio_iommu_spapr_tce_create create = {\n+\t\t.argsz = sizeof(create), };\n \tstruct vfio_iommu_spapr_tce_remove remove = {\n-\t\t.argsz = sizeof(remove),\n-\t};\n+\t\t.argsz = sizeof(remove), };\n \tstruct vfio_iommu_spapr_tce_info info = {\n-\t\t.argsz = sizeof(info),\n-\t};\n+\t\t.argsz = sizeof(info), };\n \tint ret;\n \n-\t/* query spapr iommu info */\n+\tret = spapr_dma_win_size();\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n \tret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);\n \tif (ret) {\n-\t\tRTE_LOG(ERR, EAL, \"  cannot get iommu info, \"\n-\t\t\t\t\"error %i (%s)\\n\", errno, strerror(errno));\n+\t\tRTE_LOG(ERR, EAL, \"  can't get iommu info, error %i (%s)\\n\",\n+\t\t\terrno, strerror(errno));\n \t\treturn -1;\n \t}\n \n-\t/* remove default DMA of 32 bit window */\n+\t/*\n+\t * sPAPR v1/v2 IOMMU always has a default 1G DMA window set.  The window\n+\t * can't be changed for v1 but it can be changed for v2. Since DPDK only\n+\t * supports v2, remove the default DMA window so it can be resized.\n+\t */\n \tremove.start_addr = info.dma32_window_start;\n \tret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);\n-\tif (ret) {\n-\t\tRTE_LOG(ERR, EAL, \"  cannot remove default DMA window, \"\n-\t\t\t\t\"error %i (%s)\\n\", errno, strerror(errno));\n+\tif (ret)\n \t\treturn -1;\n-\t}\n \n-\t/* create new DMA window */\n-\tret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, create);\n-\tif (ret) {\n+\t/* create a new DMA window (start address is not selectable) */\n+\tcreate.window_size = spapr_dma_win_len;\n+\tcreate.page_shift  = __builtin_ctzll(spapr_dma_win_page_sz);\n+\tcreate.levels = 1;\n+\tret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);\n #ifdef VFIO_IOMMU_SPAPR_INFO_DDW\n-\t\t/* try possible page_shift and levels for workaround */\n+\t/*\n+\t * The vfio_iommu_spapr_tce_info structure was modified in\n+\t * Linux kernel 4.2.0 to add support for the\n+\t * vfio_iommu_spapr_tce_ddw_info structure needed to try\n+\t * multiple table levels.  Skip the attempt if running with\n+\t * an older kernel.\n+\t */\n+\tif (ret) {\n+\t\t/* if at first we don't succeed, try more levels */\n \t\tuint32_t levels;\n \n-\t\tfor (levels = create->levels + 1;\n+\t\tfor (levels = create.levels + 1;\n \t\t\tret && levels <= info.ddw.levels; levels++) {\n-\t\t\tcreate->levels = levels;\n+\t\t\tcreate.levels = levels;\n \t\t\tret = ioctl(vfio_container_fd,\n-\t\t\t\tVFIO_IOMMU_SPAPR_TCE_CREATE, create);\n-\t\t}\n-#endif\n-\t\tif (ret) {\n-\t\t\tRTE_LOG(ERR, EAL, \"  cannot create new DMA window, \"\n-\t\t\t\t\t\"error %i (%s)\\n\", errno, strerror(errno));\n-\t\t\treturn -1;\n+\t\t\t\tVFIO_IOMMU_SPAPR_TCE_CREATE, &create);\n \t\t}\n \t}\n-\n-\tif (create->start_addr != 0) {\n-\t\tRTE_LOG(ERR, EAL, \"  DMA window start address != 0\\n\");\n+#endif /* VFIO_IOMMU_SPAPR_INFO_DDW */\n+\tif (ret) {\n+\t\tRTE_LOG(ERR, EAL, \"  cannot create new DMA window, error %i (%s)\\n\",\n+\t\t\terrno, strerror(errno));\n+\t\tRTE_LOG(ERR, EAL, \"  consider using a larger hugepage size \"\n+\t\t\t\"if supported by the system\\n\");\n \t\treturn -1;\n \t}\n \n-\treturn 0;\n+\t/* verify the start address  */\n+\tif (create.start_addr != 0) {\n+\t\tRTE_LOG(ERR, EAL, \"  received unsupported start address 0x%\"\n+\t\t\tPRIx64 \"\\n\", (uint64_t)create.start_addr);\n+\t\treturn -1;\n+\t}\n+\treturn ret;\n }\n \n static int\n-vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,\n-\t\tuint64_t len, int do_map)\n+vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr,\n+\t\tuint64_t iova, uint64_t len, int do_map)\n {\n-\tstruct spapr_walk_param param;\n-\tstruct vfio_iommu_spapr_tce_create create = {\n-\t\t.argsz = sizeof(create),\n-\t};\n-\tstruct vfio_config *vfio_cfg;\n-\tstruct user_mem_maps *user_mem_maps;\n-\tint i, ret = 0;\n-\n-\tvfio_cfg = get_vfio_cfg_by_container_fd(vfio_container_fd);\n-\tif (vfio_cfg == NULL) {\n-\t\tRTE_LOG(ERR, EAL, \"  invalid container fd!\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tuser_mem_maps = &vfio_cfg->mem_maps;\n-\trte_spinlock_recursive_lock(&user_mem_maps->lock);\n-\n-\t/* check if window size needs to be adjusted */\n-\tmemset(&param, 0, sizeof(param));\n-\n-\t/* we're inside a callback so use thread-unsafe version */\n-\tif (rte_memseg_walk_thread_unsafe(vfio_spapr_window_size_walk,\n-\t\t\t\t&param) < 0) {\n-\t\tRTE_LOG(ERR, EAL, \"Could not get window size\\n\");\n-\t\tret = -1;\n-\t\tgoto out;\n-\t}\n-\n-\t/* also check user maps */\n-\tfor (i = 0; i < user_mem_maps->n_maps; i++) {\n-\t\tuint64_t max = user_mem_maps->maps[i].iova +\n-\t\t\t\tuser_mem_maps->maps[i].len;\n-\t\tparam.window_size = RTE_MAX(param.window_size, max);\n-\t}\n-\n-\t/* sPAPR requires window size to be a power of 2 */\n-\tcreate.window_size = rte_align64pow2(param.window_size);\n-\tcreate.page_shift = __builtin_ctzll(param.hugepage_sz);\n-\tcreate.levels = 1;\n+\tint ret = 0;\n \n \tif (do_map) {\n-\t\t/* re-create window and remap the entire memory */\n-\t\tif (iova + len > create.window_size) {\n-\t\t\t/* release all maps before recreating the window */\n-\t\t\tif (rte_memseg_walk_thread_unsafe(vfio_spapr_unmap_walk,\n-\t\t\t\t\t&vfio_container_fd) < 0) {\n-\t\t\t\tRTE_LOG(ERR, EAL, \"Could not release DMA maps\\n\");\n-\t\t\t\tret = -1;\n-\t\t\t\tgoto out;\n-\t\t\t}\n-\t\t\t/* release all user maps */\n-\t\t\tfor (i = 0; i < user_mem_maps->n_maps; i++) {\n-\t\t\t\tstruct user_mem_map *map =\n-\t\t\t\t\t\t&user_mem_maps->maps[i];\n-\t\t\t\tif (vfio_spapr_dma_do_map(vfio_container_fd,\n-\t\t\t\t\t\tmap->addr, map->iova, map->len,\n-\t\t\t\t\t\t0)) {\n-\t\t\t\t\tRTE_LOG(ERR, EAL, \"Could not release user DMA maps\\n\");\n-\t\t\t\t\tret = -1;\n-\t\t\t\t\tgoto out;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\tcreate.window_size = rte_align64pow2(iova + len);\n-\t\t\tif (vfio_spapr_create_new_dma_window(vfio_container_fd,\n-\t\t\t\t\t&create) < 0) {\n-\t\t\t\tRTE_LOG(ERR, EAL, \"Could not create new DMA window\\n\");\n-\t\t\t\tret = -1;\n-\t\t\t\tgoto out;\n-\t\t\t}\n-\t\t\t/* we're inside a callback, so use thread-unsafe version\n-\t\t\t */\n-\t\t\tif (rte_memseg_walk_thread_unsafe(vfio_spapr_map_walk,\n-\t\t\t\t\t&vfio_container_fd) < 0) {\n-\t\t\t\tRTE_LOG(ERR, EAL, \"Could not recreate DMA maps\\n\");\n-\t\t\t\tret = -1;\n-\t\t\t\tgoto out;\n-\t\t\t}\n-\t\t\t/* remap all user maps */\n-\t\t\tfor (i = 0; i < user_mem_maps->n_maps; i++) {\n-\t\t\t\tstruct user_mem_map *map =\n-\t\t\t\t\t\t&user_mem_maps->maps[i];\n-\t\t\t\tif (vfio_spapr_dma_do_map(vfio_container_fd,\n-\t\t\t\t\t\tmap->addr, map->iova, map->len,\n-\t\t\t\t\t\t1)) {\n-\t\t\t\t\tRTE_LOG(ERR, EAL, \"Could not recreate user DMA maps\\n\");\n-\t\t\t\t\tret = -1;\n-\t\t\t\t\tgoto out;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t\tif (vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 1)) {\n+\t\tif (vfio_spapr_dma_do_map(vfio_container_fd,\n+\t\t\tvaddr, iova, len, 1)) {\n \t\t\tRTE_LOG(ERR, EAL, \"Failed to map DMA\\n\");\n \t\t\tret = -1;\n-\t\t\tgoto out;\n \t\t}\n \t} else {\n-\t\t/* for unmap, check if iova within DMA window */\n-\t\tif (iova > create.window_size) {\n-\t\t\tRTE_LOG(ERR, EAL, \"iova beyond DMA window for unmap\");\n+\t\tif (vfio_spapr_dma_do_map(vfio_container_fd,\n+\t\t\tvaddr, iova, len, 0)) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Failed to unmap DMA\\n\");\n \t\t\tret = -1;\n-\t\t\tgoto out;\n \t\t}\n-\n-\t\tvfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 0);\n \t}\n-out:\n-\trte_spinlock_recursive_unlock(&user_mem_maps->lock);\n+\n \treturn ret;\n }\n \n static int\n vfio_spapr_dma_map(int vfio_container_fd)\n {\n-\tstruct vfio_iommu_spapr_tce_create create = {\n-\t\t.argsz = sizeof(create),\n-\t};\n-\tstruct spapr_walk_param param;\n-\n-\tmemset(&param, 0, sizeof(param));\n-\n-\t/* create DMA window from 0 to max(phys_addr + len) */\n-\trte_memseg_walk(vfio_spapr_window_size_walk, &param);\n-\n-\t/* sPAPR requires window size to be a power of 2 */\n-\tcreate.window_size = rte_align64pow2(param.window_size);\n-\tcreate.page_shift = __builtin_ctzll(param.hugepage_sz);\n-\tcreate.levels = 1;\n-\n-\tif (vfio_spapr_create_new_dma_window(vfio_container_fd, &create) < 0) {\n-\t\tRTE_LOG(ERR, EAL, \"Could not create new DMA window\\n\");\n+\tif (vfio_spapr_create_dma_window(vfio_container_fd) < 0) {\n+\t\tRTE_LOG(ERR, EAL, \"Could not create new DMA window!\\n\");\n \t\treturn -1;\n \t}\n \n-\t/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */\n+\t/* map all existing DPDK segments for DMA */\n \tif (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)\n \t\treturn -1;\n \n",
    "prefixes": [
        "v6",
        "1/1"
    ]
}