[21.02,v1] net/virtio: fix memory init with vDPA backend

Message ID 20201124124557.140048-1-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [21.02,v1] net/virtio: fix memory init with vDPA backend |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/travis-robot success Travis build: passed

Commit Message

Maxime Coquelin Nov. 24, 2020, 12:45 p.m. UTC
  This patch fixes an overhead met with mlx5-vdpa Kernel
driver, where for every page in the mapped area, all the
memory tables gets updated. For example, with 2MB hugepages,
a single IOTLB_UPDATE for a 1GB region causes 512 memory
updates on mlx5-vdpa side.

Using batching mode, the mlx5 driver will only trigger a
single memory update for all the IOTLB updates that happen
between the batch begin and batch end commands.

Fixes: 6b901437056e ("net/virtio: introduce vhost-vDPA backend")
Cc: stable@dpdk.org

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_user/vhost_vdpa.c | 90 +++++++++++++++++++--
 1 file changed, 85 insertions(+), 5 deletions(-)
  

Comments

Jason Wang Nov. 25, 2020, 2:18 a.m. UTC | #1
On 2020/11/24 下午8:45, Maxime Coquelin wrote:
> This patch fixes an overhead met with mlx5-vdpa Kernel
> driver, where for every page in the mapped area, all the
> memory tables gets updated. For example, with 2MB hugepages,
> a single IOTLB_UPDATE for a 1GB region causes 512 memory
> updates on mlx5-vdpa side.
>
> Using batching mode, the mlx5 driver will only trigger a
> single memory update for all the IOTLB updates that happen
> between the batch begin and batch end commands.
>
> Fixes: 6b901437056e ("net/virtio: introduce vhost-vDPA backend")
> Cc: stable@dpdk.org
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>   drivers/net/virtio/virtio_user/vhost_vdpa.c | 90 +++++++++++++++++++--
>   1 file changed, 85 insertions(+), 5 deletions(-)


Hi Maxime:

To be safe, it's better to check whether or not the kernel support 
batching flags.

This could be done by using VHOST_GET_BACKEND_FEATURES to test whether 
VHOST_BACKEND_F_IOTLB_BATCH is there.

Thanks


>
> diff --git a/drivers/net/virtio/virtio_user/vhost_vdpa.c b/drivers/net/virtio/virtio_user/vhost_vdpa.c
> index c7b9349fc8..6d0200516d 100644
> --- a/drivers/net/virtio/virtio_user/vhost_vdpa.c
> +++ b/drivers/net/virtio/virtio_user/vhost_vdpa.c
> @@ -66,6 +66,8 @@ struct vhost_iotlb_msg {
>   #define VHOST_IOTLB_UPDATE         2
>   #define VHOST_IOTLB_INVALIDATE     3
>   #define VHOST_IOTLB_ACCESS_FAIL    4
> +#define VHOST_IOTLB_BATCH_BEGIN    5
> +#define VHOST_IOTLB_BATCH_END      6
>   	uint8_t type;
>   };
>   
> @@ -80,6 +82,40 @@ struct vhost_msg {
>   	};
>   };
>   
> +static int
> +vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
> +{
> +	struct vhost_msg msg = {};
> +
> +	msg.type = VHOST_IOTLB_MSG_V2;
> +	msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
> +
> +	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
> +		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
> +				strerror(errno));
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
> +{
> +	struct vhost_msg msg = {};
> +
> +	msg.type = VHOST_IOTLB_MSG_V2;
> +	msg.iotlb.type = VHOST_IOTLB_BATCH_END;
> +
> +	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
> +		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
> +				strerror(errno));
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
>   static int
>   vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
>   				  uint64_t iova, size_t len)
> @@ -122,6 +158,39 @@ vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
>   	return 0;
>   }
>   
> +static int
> +vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
> +				  uint64_t iova, size_t len)
> +{
> +	int ret;
> +
> +	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
> +		return -1;
> +
> +	ret = vhost_vdpa_dma_map(dev, addr, iova, len);
> +
> +	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
> +		return -1;
> +
> +	return ret;
> +}
> +
> +static int
> +vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
> +				  uint64_t iova, size_t len)
> +{
> +	int ret;
> +
> +	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
> +		return -1;
> +
> +	ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
> +
> +	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
> +		return -1;
> +
> +	return ret;
> +}
>   
>   static int
>   vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
> @@ -159,21 +228,32 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
>   static int
>   vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
>   {
> +	int ret;
> +
> +	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
> +		return -1;
> +
>   	vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
>   
>   	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
>   		/* with IOVA as VA mode, we can get away with mapping contiguous
>   		 * chunks rather than going page-by-page.
>   		 */
> -		int ret = rte_memseg_contig_walk_thread_unsafe(
> +		ret = rte_memseg_contig_walk_thread_unsafe(
>   				vhost_vdpa_map_contig, dev);
>   		if (ret)
> -			return ret;
> +			goto batch_end;
>   		/* we have to continue the walk because we've skipped the
>   		 * external segments during the config walk.
>   		 */
>   	}
> -	return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
> +	ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
> +
> +batch_end:
> +	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
> +		return -1;
> +
> +	return ret;
>   }
>   
>   /* with below features, vhost vdpa does not need to do the checksum and TSO,
> @@ -293,6 +373,6 @@ struct virtio_user_backend_ops virtio_ops_vdpa = {
>   	.setup = vhost_vdpa_setup,
>   	.send_request = vhost_vdpa_ioctl,
>   	.enable_qp = vhost_vdpa_enable_queue_pair,
> -	.dma_map = vhost_vdpa_dma_map,
> -	.dma_unmap = vhost_vdpa_dma_unmap,
> +	.dma_map = vhost_vdpa_dma_map_batch,
> +	.dma_unmap = vhost_vdpa_dma_unmap_batch,
>   };
  

Patch

diff --git a/drivers/net/virtio/virtio_user/vhost_vdpa.c b/drivers/net/virtio/virtio_user/vhost_vdpa.c
index c7b9349fc8..6d0200516d 100644
--- a/drivers/net/virtio/virtio_user/vhost_vdpa.c
+++ b/drivers/net/virtio/virtio_user/vhost_vdpa.c
@@ -66,6 +66,8 @@  struct vhost_iotlb_msg {
 #define VHOST_IOTLB_UPDATE         2
 #define VHOST_IOTLB_INVALIDATE     3
 #define VHOST_IOTLB_ACCESS_FAIL    4
+#define VHOST_IOTLB_BATCH_BEGIN    5
+#define VHOST_IOTLB_BATCH_END      6
 	uint8_t type;
 };
 
@@ -80,6 +82,40 @@  struct vhost_msg {
 	};
 };
 
+static int
+vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
+{
+	struct vhost_msg msg = {};
+
+	msg.type = VHOST_IOTLB_MSG_V2;
+	msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
+
+	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
+		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
+				strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
+{
+	struct vhost_msg msg = {};
+
+	msg.type = VHOST_IOTLB_MSG_V2;
+	msg.iotlb.type = VHOST_IOTLB_BATCH_END;
+
+	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
+		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
+				strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
 static int
 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
 				  uint64_t iova, size_t len)
@@ -122,6 +158,39 @@  vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
 	return 0;
 }
 
+static int
+vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
+				  uint64_t iova, size_t len)
+{
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
+	ret = vhost_vdpa_dma_map(dev, addr, iova, len);
+
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
+}
+
+static int
+vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
+				  uint64_t iova, size_t len)
+{
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
+	ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
+
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
+}
 
 static int
 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
@@ -159,21 +228,32 @@  vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 static int
 vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
 {
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
 	vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
 
 	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
 		/* with IOVA as VA mode, we can get away with mapping contiguous
 		 * chunks rather than going page-by-page.
 		 */
-		int ret = rte_memseg_contig_walk_thread_unsafe(
+		ret = rte_memseg_contig_walk_thread_unsafe(
 				vhost_vdpa_map_contig, dev);
 		if (ret)
-			return ret;
+			goto batch_end;
 		/* we have to continue the walk because we've skipped the
 		 * external segments during the config walk.
 		 */
 	}
-	return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
+	ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
+
+batch_end:
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
 }
 
 /* with below features, vhost vdpa does not need to do the checksum and TSO,
@@ -293,6 +373,6 @@  struct virtio_user_backend_ops virtio_ops_vdpa = {
 	.setup = vhost_vdpa_setup,
 	.send_request = vhost_vdpa_ioctl,
 	.enable_qp = vhost_vdpa_enable_queue_pair,
-	.dma_map = vhost_vdpa_dma_map,
-	.dma_unmap = vhost_vdpa_dma_unmap,
+	.dma_map = vhost_vdpa_dma_map_batch,
+	.dma_unmap = vhost_vdpa_dma_unmap_batch,
 };