get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/84661/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 84661,
    "url": "http://patches.dpdk.org/api/patches/84661/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201201193302.28131-2-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201201193302.28131-2-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201201193302.28131-2-ndabilpuram@marvell.com",
    "date": "2020-12-01T19:32:59",
    "name": "[v3,1/4] vfio: revert changes for map contiguous areas in one go",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "1f4487178d0679a5f5fc8718deb876a122545bf1",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201201193302.28131-2-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 14163,
            "url": "http://patches.dpdk.org/api/series/14163/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=14163",
            "date": "2020-12-01T19:32:58",
            "name": "fix issue with partial DMA unmap",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/14163/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/84661/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/84661/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8E41EA04DB;\n\tTue,  1 Dec 2020 20:33:50 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id ACD58C9A8;\n\tTue,  1 Dec 2020 20:33:33 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id 04A57C9A6;\n Tue,  1 Dec 2020 20:33:31 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 0B1JTwgR010909; Tue, 1 Dec 2020 11:33:30 -0800",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com with ESMTP id 353pxshe8u-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Tue, 01 Dec 2020 11:33:29 -0800",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Tue, 1 Dec 2020 11:33:27 -0800",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Tue, 1 Dec 2020 11:33:27 -0800",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id 5FBF73F7045;\n Tue,  1 Dec 2020 11:33:25 -0800 (PST)"
        ],
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "<anatoly.burakov@intel.com>, David Christensen <drc@linux.vnet.ibm.com>,\n <david.marchand@redhat.com>",
        "CC": "<jerinj@marvell.com>, <dev@dpdk.org>, Nithin Dabilpuram\n <ndabilpuram@marvell.com>, <stable@dpdk.org>",
        "Date": "Wed, 2 Dec 2020 01:02:59 +0530",
        "Message-ID": "<20201201193302.28131-2-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20201201193302.28131-1-ndabilpuram@marvell.com>",
        "References": "<20201012081106.10610-1-ndabilpuram@marvell.com>\n <20201201193302.28131-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.312, 18.0.737\n definitions=2020-12-01_09:2020-11-30,\n 2020-12-01 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v3 1/4] vfio: revert changes for map contiguous\n\tareas in one go",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "In order to save DMA entries limited by kernel both for externel\nmemory and hugepage memory, an attempt was made to map physically\ncontiguous memory in one go. This cannot be done as VFIO IOMMU type1\ndoes not support partially unmapping a previously mapped memory\nregion while Heap can request for multi page mapping and\npartial unmapping.\nHence for going back to old method of mapping/unmapping at\nmemseg granularity, this commit reverts\ncommit d1c7c0cdf7ba (\"vfio: map contiguous areas in one go\")\n\nAlso add documentation on what module parameter needs to be used\nto increase the per-container dma map limit for VFIO.\n\nFixes: d1c7c0cdf7ba (\"vfio: map contiguous areas in one go\")\nCc: anatoly.burakov@intel.com\nCc: stable@dpdk.org\n\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\nAcked-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n doc/guides/linux_gsg/linux_drivers.rst | 10 ++++++\n lib/librte_eal/linux/eal_vfio.c        | 59 +++++-----------------------------\n 2 files changed, 18 insertions(+), 51 deletions(-)",
    "diff": "diff --git a/doc/guides/linux_gsg/linux_drivers.rst b/doc/guides/linux_gsg/linux_drivers.rst\nindex 90635a4..9a662a7 100644\n--- a/doc/guides/linux_gsg/linux_drivers.rst\n+++ b/doc/guides/linux_gsg/linux_drivers.rst\n@@ -25,6 +25,16 @@ To make use of VFIO, the ``vfio-pci`` module must be loaded:\n VFIO kernel is usually present by default in all distributions,\n however please consult your distributions documentation to make sure that is the case.\n \n+For DMA mapping of either external memory or hugepages, VFIO interface is used.\n+VFIO does not support partial unmap of once mapped memory. Hence DPDK's memory is\n+mapped in hugepage granularity or system page granularity. Number of DMA\n+mappings is limited by kernel with user locked memory limit of a process(rlimit)\n+for system/hugepage memory. Another per-container overall limit applicable both\n+for external memory and system memory was added in kernel 5.1 defined by\n+VFIO module parameter ``dma_entry_limit`` with a default value of 64K.\n+When application is out of DMA entries, these limits need to be adjusted to\n+increase the allowed limit.\n+\n Since Linux version 5.7,\n the ``vfio-pci`` module supports the creation of virtual functions.\n After the PF is bound to ``vfio-pci`` module,\ndiff --git a/lib/librte_eal/linux/eal_vfio.c b/lib/librte_eal/linux/eal_vfio.c\nindex 0500824..64b134d 100644\n--- a/lib/librte_eal/linux/eal_vfio.c\n+++ b/lib/librte_eal/linux/eal_vfio.c\n@@ -517,11 +517,9 @@ static void\n vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,\n \t\tvoid *arg __rte_unused)\n {\n-\trte_iova_t iova_start, iova_expected;\n \tstruct rte_memseg_list *msl;\n \tstruct rte_memseg *ms;\n \tsize_t cur_len = 0;\n-\tuint64_t va_start;\n \n \tmsl = rte_mem_virt2memseg_list(addr);\n \n@@ -539,63 +537,22 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,\n \n \t/* memsegs are contiguous in memory */\n \tms = rte_mem_virt2memseg(addr, msl);\n-\n-\t/*\n-\t * This memory is not guaranteed to be contiguous, but it still could\n-\t * be, or it could have some small contiguous chunks. Since the number\n-\t * of VFIO mappings is limited, and VFIO appears to not concatenate\n-\t * adjacent mappings, we have to do this ourselves.\n-\t *\n-\t * So, find contiguous chunks, then map them.\n-\t */\n-\tva_start = ms->addr_64;\n-\tiova_start = iova_expected = ms->iova;\n \twhile (cur_len < len) {\n-\t\tbool new_contig_area = ms->iova != iova_expected;\n-\t\tbool last_seg = (len - cur_len) == ms->len;\n-\t\tbool skip_last = false;\n-\n-\t\t/* only do mappings when current contiguous area ends */\n-\t\tif (new_contig_area) {\n-\t\t\tif (type == RTE_MEM_EVENT_ALLOC)\n-\t\t\t\tvfio_dma_mem_map(default_vfio_cfg, va_start,\n-\t\t\t\t\t\tiova_start,\n-\t\t\t\t\t\tiova_expected - iova_start, 1);\n-\t\t\telse\n-\t\t\t\tvfio_dma_mem_map(default_vfio_cfg, va_start,\n-\t\t\t\t\t\tiova_start,\n-\t\t\t\t\t\tiova_expected - iova_start, 0);\n-\t\t\tva_start = ms->addr_64;\n-\t\t\tiova_start = ms->iova;\n-\t\t}\n \t\t/* some memory segments may have invalid IOVA */\n \t\tif (ms->iova == RTE_BAD_IOVA) {\n \t\t\tRTE_LOG(DEBUG, EAL, \"Memory segment at %p has bad IOVA, skipping\\n\",\n \t\t\t\t\tms->addr);\n-\t\t\tskip_last = true;\n+\t\t\tgoto next;\n \t\t}\n-\t\tiova_expected = ms->iova + ms->len;\n+\t\tif (type == RTE_MEM_EVENT_ALLOC)\n+\t\t\tvfio_dma_mem_map(default_vfio_cfg, ms->addr_64,\n+\t\t\t\t\tms->iova, ms->len, 1);\n+\t\telse\n+\t\t\tvfio_dma_mem_map(default_vfio_cfg, ms->addr_64,\n+\t\t\t\t\tms->iova, ms->len, 0);\n+next:\n \t\tcur_len += ms->len;\n \t\t++ms;\n-\n-\t\t/*\n-\t\t * don't count previous segment, and don't attempt to\n-\t\t * dereference a potentially invalid pointer.\n-\t\t */\n-\t\tif (skip_last && !last_seg) {\n-\t\t\tiova_expected = iova_start = ms->iova;\n-\t\t\tva_start = ms->addr_64;\n-\t\t} else if (!skip_last && last_seg) {\n-\t\t\t/* this is the last segment and we're not skipping */\n-\t\t\tif (type == RTE_MEM_EVENT_ALLOC)\n-\t\t\t\tvfio_dma_mem_map(default_vfio_cfg, va_start,\n-\t\t\t\t\t\tiova_start,\n-\t\t\t\t\t\tiova_expected - iova_start, 1);\n-\t\t\telse\n-\t\t\t\tvfio_dma_mem_map(default_vfio_cfg, va_start,\n-\t\t\t\t\t\tiova_start,\n-\t\t\t\t\t\tiova_expected - iova_start, 0);\n-\t\t}\n \t}\n }\n \n",
    "prefixes": [
        "v3",
        "1/4"
    ]
}