get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/62468/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 62468,
    "url": "http://patches.dpdk.org/api/patches/62468/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/5dd669557fc499df5e345a14c9252c095eff6c07.1572966906.git.anatoly.burakov@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<5dd669557fc499df5e345a14c9252c095eff6c07.1572966906.git.anatoly.burakov@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/5dd669557fc499df5e345a14c9252c095eff6c07.1572966906.git.anatoly.burakov@intel.com",
    "date": "2019-11-05T15:15:12",
    "name": "[19.11] vfio: fix DMA mapping of externally allocated heaps",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "bb10d04ac7686c55bbd789778d2c6f970724a39c",
    "submitter": {
        "id": 4,
        "url": "http://patches.dpdk.org/api/people/4/?format=api",
        "name": "Anatoly Burakov",
        "email": "anatoly.burakov@intel.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/5dd669557fc499df5e345a14c9252c095eff6c07.1572966906.git.anatoly.burakov@intel.com/mbox/",
    "series": [
        {
            "id": 7254,
            "url": "http://patches.dpdk.org/api/series/7254/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7254",
            "date": "2019-11-05T15:15:12",
            "name": "[19.11] vfio: fix DMA mapping of externally allocated heaps",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/7254/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/62468/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/62468/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CA057A04A2;\n\tTue,  5 Nov 2019 16:15:15 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 205FB1BF0A;\n\tTue,  5 Nov 2019 16:15:15 +0100 (CET)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by dpdk.org (Postfix) with ESMTP id D65D71BF07\n for <dev@dpdk.org>; Tue,  5 Nov 2019 16:15:13 +0100 (CET)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n 05 Nov 2019 07:15:12 -0800",
            "from silpixa00399498.ir.intel.com (HELO\n silpixa00399498.ger.corp.intel.com) ([10.237.223.151])\n by fmsmga004.fm.intel.com with ESMTP; 05 Nov 2019 07:15:10 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.68,271,1569308400\"; d=\"scan'208\";a=\"227128882\"",
        "From": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Bruce Richardson <bruce.richardson@intel.com>, rajesh.ravi@broadcom.com,\n ajit.khaparde@broadcom.com, jonathan.richardson@broadcom.com,\n scott.branden@broadcom.com, vikram.prakash@broadcom.com,\n srinath.mannam@broadcom.com, david.marchand@redhat.com, thomas@monjalon.net",
        "Date": "Tue,  5 Nov 2019 15:15:12 +0000",
        "Message-Id": "\n <5dd669557fc499df5e345a14c9252c095eff6c07.1572966906.git.anatoly.burakov@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "Subject": "[dpdk-dev] [PATCH 19.11] vfio: fix DMA mapping of externally\n\tallocated heaps",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Currently, externally created heaps are supposed to be automatically\nmapped for VFIO DMA by EAL, however they only do so if, at the time of\nheap creation, VFIO is initialized and has at least one device\navailable. If no devices are available at the time of heap creation (or\nif devices were available, but were since hot-unplugged, thus dropping\nall VFIO container mappings), then VFIO mapping code would have skipped\nover externally allocated heaps.\n\nThe fix is two-fold. First, we allow externally allocated memory\nsegments to be marked as \"heap\" segments. This allows us to distinguish\nbetween external memory segments that were created via heap API, from\nthose that were created via rte_extmem_register() API.\n\nThen, we fix the VFIO code to only skip non-heap external segments.\nAlso, since external heaps are not guaranteed to have valid IOVA\naddresses, we will skip those which have invalid IOVA addresses as well.\n\nFixes: 0f526d674f8e (\"malloc: separate creating memseg list and malloc heap\")\n\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n\nNotes:\n    This cannot be backported to older releases as it breaks the\n    API and ABI. A separate fix is in the works for stable.\n\n lib/librte_eal/common/include/rte_memory.h |  1 +\n lib/librte_eal/common/rte_malloc.c         |  1 +\n lib/librte_eal/freebsd/eal/eal_memory.c    |  1 +\n lib/librte_eal/linux/eal/eal_memory.c      |  3 ++\n lib/librte_eal/linux/eal/eal_vfio.c        | 46 +++++++++++++++++++---\n 5 files changed, 47 insertions(+), 5 deletions(-)",
    "diff": "diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h\nindex 38e00e382c..bf81a2faa8 100644\n--- a/lib/librte_eal/common/include/rte_memory.h\n+++ b/lib/librte_eal/common/include/rte_memory.h\n@@ -81,6 +81,7 @@ struct rte_memseg_list {\n \tvolatile uint32_t version; /**< version number for multiprocess sync. */\n \tsize_t len; /**< Length of memory area covered by this memseg list. */\n \tunsigned int external; /**< 1 if this list points to external memory */\n+\tunsigned int heap; /**< 1 if this list points to a heap */\n \tstruct rte_fbarray memseg_arr;\n };\n \ndiff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c\nindex 044d3a9078..413e4aa004 100644\n--- a/lib/librte_eal/common/rte_malloc.c\n+++ b/lib/librte_eal/common/rte_malloc.c\n@@ -396,6 +396,7 @@ rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,\n \n \trte_spinlock_lock(&heap->lock);\n \tret = malloc_heap_add_external_memory(heap, msl);\n+\tmsl->heap = 1; /* mark it as heap segment */\n \trte_spinlock_unlock(&heap->lock);\n \n unlock:\ndiff --git a/lib/librte_eal/freebsd/eal/eal_memory.c b/lib/librte_eal/freebsd/eal/eal_memory.c\nindex 7fe3178898..a97d8f0f0c 100644\n--- a/lib/librte_eal/freebsd/eal/eal_memory.c\n+++ b/lib/librte_eal/freebsd/eal/eal_memory.c\n@@ -93,6 +93,7 @@ rte_eal_hugepage_init(void)\n \t\tmsl->page_sz = page_sz;\n \t\tmsl->len = internal_config.memory;\n \t\tmsl->socket_id = 0;\n+\t\tmsl->heap = 1;\n \n \t\t/* populate memsegs. each memseg is 1 page long */\n \t\tfor (cur_seg = 0; cur_seg < n_segs; cur_seg++) {\ndiff --git a/lib/librte_eal/linux/eal/eal_memory.c b/lib/librte_eal/linux/eal/eal_memory.c\nindex accfd2e232..43e4ffc757 100644\n--- a/lib/librte_eal/linux/eal/eal_memory.c\n+++ b/lib/librte_eal/linux/eal/eal_memory.c\n@@ -831,6 +831,7 @@ alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,\n \tmsl->page_sz = page_sz;\n \tmsl->socket_id = socket_id;\n \tmsl->base_va = NULL;\n+\tmsl->heap = 1; /* mark it as a heap segment */\n \n \tRTE_LOG(DEBUG, EAL, \"Memseg list allocated: 0x%zxkB at socket %i\\n\",\n \t\t\t(size_t)page_sz >> 10, socket_id);\n@@ -1405,6 +1406,7 @@ eal_legacy_hugepage_init(void)\n \t\tmsl->page_sz = page_sz;\n \t\tmsl->socket_id = 0;\n \t\tmsl->len = internal_config.memory;\n+\t\tmsl->heap = 1;\n \n \t\t/* we're in single-file segments mode, so only the segment list\n \t\t * fd needs to be set up.\n@@ -1677,6 +1679,7 @@ eal_legacy_hugepage_init(void)\n \t\tmem_sz = msl->len;\n \t\tmunmap(msl->base_va, mem_sz);\n \t\tmsl->base_va = NULL;\n+\t\tmsl->heap = 0;\n \n \t\t/* destroy backing fbarray */\n \t\trte_fbarray_destroy(&msl->memseg_arr);\ndiff --git a/lib/librte_eal/linux/eal/eal_vfio.c b/lib/librte_eal/linux/eal/eal_vfio.c\nindex d9541b1220..d5a2bbea0d 100644\n--- a/lib/librte_eal/linux/eal/eal_vfio.c\n+++ b/lib/librte_eal/linux/eal/eal_vfio.c\n@@ -1250,7 +1250,16 @@ type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,\n {\n \tint *vfio_container_fd = arg;\n \n-\tif (msl->external)\n+\t/* skip external memory that isn't a heap */\n+\tif (msl->external && !msl->heap)\n+\t\treturn 0;\n+\n+\t/* skip any segments with invalid IOVA addresses */\n+\tif (ms->iova == RTE_BAD_IOVA)\n+\t\treturn 0;\n+\n+\t/* if IOVA mode is VA, we've already mapped the internal segments */\n+\tif (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)\n \t\treturn 0;\n \n \treturn vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,\n@@ -1313,12 +1322,18 @@ vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,\n static int\n vfio_type1_dma_map(int vfio_container_fd)\n {\n+\tint ret;\n \tif (rte_eal_iova_mode() == RTE_IOVA_VA) {\n \t\t/* with IOVA as VA mode, we can get away with mapping contiguous\n \t\t * chunks rather than going page-by-page.\n \t\t */\n-\t\treturn rte_memseg_contig_walk(type1_map_contig,\n+\t\tret = rte_memseg_contig_walk(type1_map_contig,\n \t\t\t\t&vfio_container_fd);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\t/* we have to continue the walk because we've skipped the\n+\t\t * external segments during the config walk.\n+\t\t */\n \t}\n \treturn rte_memseg_walk(type1_map, &vfio_container_fd);\n }\n@@ -1410,7 +1425,15 @@ vfio_spapr_map_walk(const struct rte_memseg_list *msl,\n {\n \tstruct spapr_remap_walk_param *param = arg;\n \n-\tif (msl->external || ms->addr_64 == param->addr_64)\n+\t/* skip external memory that isn't a heap */\n+\tif (msl->external && !msl->heap)\n+\t\treturn 0;\n+\n+\t/* skip any segments with invalid IOVA addresses */\n+\tif (ms->iova == RTE_BAD_IOVA)\n+\t\treturn 0;\n+\n+\tif (ms->addr_64 == param->addr_64)\n \t\treturn 0;\n \n \treturn vfio_spapr_dma_do_map(param->vfio_container_fd, ms->addr_64, ms->iova,\n@@ -1423,7 +1446,15 @@ vfio_spapr_unmap_walk(const struct rte_memseg_list *msl,\n {\n \tstruct spapr_remap_walk_param *param = arg;\n \n-\tif (msl->external || ms->addr_64 == param->addr_64)\n+\t/* skip external memory that isn't a heap */\n+\tif (msl->external && !msl->heap)\n+\t\treturn 0;\n+\n+\t/* skip any segments with invalid IOVA addresses */\n+\tif (ms->iova == RTE_BAD_IOVA)\n+\t\treturn 0;\n+\n+\tif (ms->addr_64 == param->addr_64)\n \t\treturn 0;\n \n \treturn vfio_spapr_dma_do_map(param->vfio_container_fd, ms->addr_64, ms->iova,\n@@ -1443,7 +1474,12 @@ vfio_spapr_window_size_walk(const struct rte_memseg_list *msl,\n \tstruct spapr_walk_param *param = arg;\n \tuint64_t max = ms->iova + ms->len;\n \n-\tif (msl->external)\n+\t/* skip external memory that isn't a heap */\n+\tif (msl->external && !msl->heap)\n+\t\treturn 0;\n+\n+\t/* skip any segments with invalid IOVA addresses */\n+\tif (ms->iova == RTE_BAD_IOVA)\n \t\treturn 0;\n \n \t/* do not iterate ms we haven't mapped yet  */\n",
    "prefixes": [
        "19.11"
    ]
}