[18.11] vfio: map contiguous areas in one go

Message ID 4275aaf7d248f0e2a05ac21c05d682ce7ea4ad56.1600255570.git.anatoly.burakov@intel.com (mailing list archive)
State Not Applicable, archived
Headers
Series [18.11] vfio: map contiguous areas in one go |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Burakov, Anatoly Sept. 16, 2020, 11:26 a.m. UTC
  Currently, when we are creating DMA mappings for memory that's
either external or is backed by hugepages in IOVA as PA mode, we
assume that each page is necessarily discontiguous. This may not
actually be the case, especially for external memory, where the
user is able to create their own IOVA table and make it
contiguous. This is a problem because VFIO has a limited number
of DMA mappings, and it does not appear to concatenate them and
treats each mapping as separate, even when they cover adjacent
areas.

Fix this so that we always map contiguous memory in a single
chunk, as opposed to mapping each segment separately.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---

Notes:
    Resend for stable as original patch [1] no longer applies
    
    [1] http://patches.dpdk.org/patch/66041/

 lib/librte_eal/linuxapp/eal/eal_vfio.c | 59 ++++++++++++++++++++++----
 1 file changed, 51 insertions(+), 8 deletions(-)
  

Comments

Mei, JianweiX Sept. 17, 2020, 2:48 a.m. UTC | #1
Tested-by:  Mei,JianweiX < jianweix.mei@intel.com>

-----Original Message-----
From: dev <dev-bounces@dpdk.org> On Behalf Of Anatoly Burakov
Sent: Wednesday, September 16, 2020 7:26 PM
To: dev@dpdk.org
Cc: stable@dpdk.org
Subject: [dpdk-dev] [PATCH 18.11] vfio: map contiguous areas in one go

Currently, when we are creating DMA mappings for memory that's either external or is backed by hugepages in IOVA as PA mode, we assume that each page is necessarily discontiguous. This may not actually be the case, especially for external memory, where the user is able to create their own IOVA table and make it contiguous. This is a problem because VFIO has a limited number of DMA mappings, and it does not appear to concatenate them and treats each mapping as separate, even when they cover adjacent areas.

Fix this so that we always map contiguous memory in a single chunk, as opposed to mapping each segment separately.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---

Notes:
    Resend for stable as original patch [1] no longer applies
    
    [1] http://patches.dpdk.org/patch/66041/

 lib/librte_eal/linuxapp/eal/eal_vfio.c | 59 ++++++++++++++++++++++----
 1 file changed, 51 insertions(+), 8 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 2f84e0f215..be969bbaac 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -510,9 +510,11 @@ static void
 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
 		void *arg __rte_unused)
 {
+	rte_iova_t iova_start, iova_expected;
 	struct rte_memseg_list *msl;
 	struct rte_memseg *ms;
 	size_t cur_len = 0;
+	uint64_t va_start;
 
 	msl = rte_mem_virt2memseg_list(addr);
 
@@ -530,22 +532,63 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
 
 	/* memsegs are contiguous in memory */
 	ms = rte_mem_virt2memseg(addr, msl);
+
+	/*
+	 * This memory is not guaranteed to be contiguous, but it still could
+	 * be, or it could have some small contiguous chunks. Since the number
+	 * of VFIO mappings is limited, and VFIO appears to not concatenate
+	 * adjacent mappings, we have to do this ourselves.
+	 *
+	 * So, find contiguous chunks, then map them.
+	 */
+	va_start = ms->addr_64;
+	iova_start = iova_expected = ms->iova;
 	while (cur_len < len) {
+		bool new_contig_area = ms->iova != iova_expected;
+		bool last_seg = (len - cur_len) == ms->len;
+		bool skip_last = false;
+
+		/* only do mappings when current contiguous area ends */
+		if (new_contig_area) {
+			if (type == RTE_MEM_EVENT_ALLOC)
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 1);
+			else
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 0);
+			va_start = ms->addr_64;
+			iova_start = ms->iova;
+		}
 		/* some memory segments may have invalid IOVA */
 		if (ms->iova == RTE_BAD_IOVA) {
 			RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n",
 					ms->addr);
-			goto next;
+			skip_last = true;
 		}
-		if (type == RTE_MEM_EVENT_ALLOC)
-			vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
-					ms->iova, ms->len, 1);
-		else
-			vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
-					ms->iova, ms->len, 0);
-next:
+		iova_expected = ms->iova + ms->len;
 		cur_len += ms->len;
 		++ms;
+
+		/*
+		 * don't count previous segment, and don't attempt to
+		 * dereference a potentially invalid pointer.
+		 */
+		if (skip_last && !last_seg) {
+			iova_expected = iova_start = ms->iova;
+			va_start = ms->addr_64;
+		} else if (!skip_last && last_seg) {
+			/* this is the last segment and we're not skipping */
+			if (type == RTE_MEM_EVENT_ALLOC)
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 1);
+			else
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 0);
+		}
 	}
 }
 
--
2.17.1
  

Patch

diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 2f84e0f215..be969bbaac 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -510,9 +510,11 @@  static void
 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
 		void *arg __rte_unused)
 {
+	rte_iova_t iova_start, iova_expected;
 	struct rte_memseg_list *msl;
 	struct rte_memseg *ms;
 	size_t cur_len = 0;
+	uint64_t va_start;
 
 	msl = rte_mem_virt2memseg_list(addr);
 
@@ -530,22 +532,63 @@  vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
 
 	/* memsegs are contiguous in memory */
 	ms = rte_mem_virt2memseg(addr, msl);
+
+	/*
+	 * This memory is not guaranteed to be contiguous, but it still could
+	 * be, or it could have some small contiguous chunks. Since the number
+	 * of VFIO mappings is limited, and VFIO appears to not concatenate
+	 * adjacent mappings, we have to do this ourselves.
+	 *
+	 * So, find contiguous chunks, then map them.
+	 */
+	va_start = ms->addr_64;
+	iova_start = iova_expected = ms->iova;
 	while (cur_len < len) {
+		bool new_contig_area = ms->iova != iova_expected;
+		bool last_seg = (len - cur_len) == ms->len;
+		bool skip_last = false;
+
+		/* only do mappings when current contiguous area ends */
+		if (new_contig_area) {
+			if (type == RTE_MEM_EVENT_ALLOC)
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 1);
+			else
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 0);
+			va_start = ms->addr_64;
+			iova_start = ms->iova;
+		}
 		/* some memory segments may have invalid IOVA */
 		if (ms->iova == RTE_BAD_IOVA) {
 			RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n",
 					ms->addr);
-			goto next;
+			skip_last = true;
 		}
-		if (type == RTE_MEM_EVENT_ALLOC)
-			vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
-					ms->iova, ms->len, 1);
-		else
-			vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
-					ms->iova, ms->len, 0);
-next:
+		iova_expected = ms->iova + ms->len;
 		cur_len += ms->len;
 		++ms;
+
+		/*
+		 * don't count previous segment, and don't attempt to
+		 * dereference a potentially invalid pointer.
+		 */
+		if (skip_last && !last_seg) {
+			iova_expected = iova_start = ms->iova;
+			va_start = ms->addr_64;
+		} else if (!skip_last && last_seg) {
+			/* this is the last segment and we're not skipping */
+			if (type == RTE_MEM_EVENT_ALLOC)
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 1);
+			else
+				vfio_dma_mem_map(default_vfio_cfg, va_start,
+						iova_start,
+						iova_expected - iova_start, 0);
+		}
 	}
 }