[dpdk-dev,08/21] vhost: iotlb: add pending miss request list and helpers

Message ID 20170831095023.21037-9-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Yuanhan Liu
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Maxime Coquelin Aug. 31, 2017, 9:50 a.m. UTC
  In order to be able to handle other ports or queues while waiting
for an IOTLB miss reply, a pending list is created so that waiter
can return and restart later on with sending again a miss request.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/iotlb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++--
 lib/librte_vhost/iotlb.h |  4 +++
 lib/librte_vhost/vhost.h |  1 +
 3 files changed, 91 insertions(+), 2 deletions(-)
  

Comments

Tiwei Bie Sept. 5, 2017, 7:11 a.m. UTC | #1
On Thu, Aug 31, 2017 at 11:50:10AM +0200, Maxime Coquelin wrote:
> In order to be able to handle other ports or queues while waiting
> for an IOTLB miss reply, a pending list is created so that waiter
> can return and restart later on with sending again a miss request.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/librte_vhost/iotlb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++--
>  lib/librte_vhost/iotlb.h |  4 +++
>  lib/librte_vhost/vhost.h |  1 +
>  3 files changed, 91 insertions(+), 2 deletions(-)
> 
> diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
> index 1b739dae5..d014bfe98 100644
> --- a/lib/librte_vhost/iotlb.c
> +++ b/lib/librte_vhost/iotlb.c
> @@ -49,7 +49,86 @@ struct vhost_iotlb_entry {
>  	uint8_t perm;
>  };
>  
> -#define IOTLB_CACHE_SIZE 1024
> +#define IOTLB_CACHE_SIZE 2048
> +
> +static void vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
> +{
> +	struct vhost_iotlb_entry *node, *temp_node;
> +
> +	rte_rwlock_write_lock(&vq->iotlb_lock);
> +
> +	TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
> +		TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
> +		rte_mempool_put(vq->iotlb_pool, node);
> +	}
> +
> +	rte_rwlock_write_unlock(&vq->iotlb_lock);
> +}
> +
> +int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
> +				uint8_t perm)
> +{
> +	struct vhost_iotlb_entry *node;
> +	int found = 0;
> +

The return value of this function is boolean. So it's better
to return bool instead of int.

> +	rte_rwlock_read_lock(&vq->iotlb_lock);
> +
> +	TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
> +		if ((node->iova == iova) && (node->perm == perm)) {
> +			found = 1;
> +			break;
> +		}
> +	}
> +
> +	rte_rwlock_read_unlock(&vq->iotlb_lock);
> +
> +	return found;
> +}
> +
> +void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
> +				uint64_t iova, uint8_t perm)
> +{
> +	struct vhost_iotlb_entry *node;
> +	int ret;
> +
> +	ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
> +	if (ret) {
> +		RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool empty, invalidate cache\n");

I think The log level should be INFO or the likes, not ERR.

> +		vhost_user_iotlb_pending_remove_all(vq);
> +		ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
> +		if (ret) {
> +			RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n");
> +			return;
> +		}
> +	}
> +
> +	node->iova = iova;
> +	node->perm = perm;
> +
> +	rte_rwlock_write_lock(&vq->iotlb_lock);
> +
> +	TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
> +
> +	rte_rwlock_write_unlock(&vq->iotlb_lock);
> +}
> +
> +static void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
> +				uint64_t iova, uint64_t size, uint8_t perm)
> +{
> +	struct vhost_iotlb_entry *node, *temp_node;
> +
> +	/* .iotlb_lock already locked by the caller */
> +	TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
> +		if (node->iova < iova)
> +			continue;
> +		if (node->iova >= iova + size)
> +			continue;
> +		if ((node->perm & perm) != node->perm)
> +			continue;
> +		TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
> +		rte_mempool_put(vq->iotlb_pool, node);
> +	}
> +}
>  
>  static void vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
>  {
> @@ -106,7 +185,10 @@ void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
>  	TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
>  
>  unlock:
> +	vhost_user_iotlb_pending_remove(vq, iova, size, perm);
> +
>  	rte_rwlock_write_unlock(&vq->iotlb_lock);
> +

This empty line should be removed.

Best regards,
Tiwei Bie

>  }
>  
>  void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
  
Maxime Coquelin Sept. 5, 2017, 3:18 p.m. UTC | #2
On 09/05/2017 09:11 AM, Tiwei Bie wrote:
> On Thu, Aug 31, 2017 at 11:50:10AM +0200, Maxime Coquelin wrote:
>> In order to be able to handle other ports or queues while waiting
>> for an IOTLB miss reply, a pending list is created so that waiter
>> can return and restart later on with sending again a miss request.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>>   lib/librte_vhost/iotlb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++--
>>   lib/librte_vhost/iotlb.h |  4 +++
>>   lib/librte_vhost/vhost.h |  1 +
>>   3 files changed, 91 insertions(+), 2 deletions(-)
>>
>> diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
>> index 1b739dae5..d014bfe98 100644
>> --- a/lib/librte_vhost/iotlb.c
>> +++ b/lib/librte_vhost/iotlb.c
>> @@ -49,7 +49,86 @@ struct vhost_iotlb_entry {
>>   	uint8_t perm;
>>   };
>>   
>> -#define IOTLB_CACHE_SIZE 1024
>> +#define IOTLB_CACHE_SIZE 2048
>> +
>> +static void vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
>> +{
>> +	struct vhost_iotlb_entry *node, *temp_node;
>> +
>> +	rte_rwlock_write_lock(&vq->iotlb_lock);
>> +
>> +	TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
>> +		TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
>> +		rte_mempool_put(vq->iotlb_pool, node);
>> +	}
>> +
>> +	rte_rwlock_write_unlock(&vq->iotlb_lock);
>> +}
>> +
>> +int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
>> +				uint8_t perm)
>> +{
>> +	struct vhost_iotlb_entry *node;
>> +	int found = 0;
>> +
> 
> The return value of this function is boolean. So it's better
> to return bool instead of int.

Fixed.

>> +	rte_rwlock_read_lock(&vq->iotlb_lock);
>> +
>> +	TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
>> +		if ((node->iova == iova) && (node->perm == perm)) {
>> +			found = 1;
>> +			break;
>> +		}
>> +	}
>> +
>> +	rte_rwlock_read_unlock(&vq->iotlb_lock);
>> +
>> +	return found;
>> +}
>> +
>> +void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
>> +				uint64_t iova, uint8_t perm)
>> +{
>> +	struct vhost_iotlb_entry *node;
>> +	int ret;
>> +
>> +	ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
>> +	if (ret) {
>> +		RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool empty, invalidate cache\n");
> 
> I think The log level should be INFO or the likes, not ERR.

Fixed.
> 
>> +		vhost_user_iotlb_pending_remove_all(vq);
>> +		ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
>> +		if (ret) {
>> +			RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n");
>> +			return;
>> +		}
>> +	}
>> +
>> +	node->iova = iova;
>> +	node->perm = perm;
>> +
>> +	rte_rwlock_write_lock(&vq->iotlb_lock);
>> +
>> +	TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
>> +
>> +	rte_rwlock_write_unlock(&vq->iotlb_lock);
>> +}
>> +
>> +static void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
>> +				uint64_t iova, uint64_t size, uint8_t perm)
>> +{
>> +	struct vhost_iotlb_entry *node, *temp_node;
>> +
>> +	/* .iotlb_lock already locked by the caller */
>> +	TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
>> +		if (node->iova < iova)
>> +			continue;
>> +		if (node->iova >= iova + size)
>> +			continue;
>> +		if ((node->perm & perm) != node->perm)
>> +			continue;
>> +		TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
>> +		rte_mempool_put(vq->iotlb_pool, node);
>> +	}
>> +}
>>   
>>   static void vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
>>   {
>> @@ -106,7 +185,10 @@ void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
>>   	TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
>>   
>>   unlock:
>> +	vhost_user_iotlb_pending_remove(vq, iova, size, perm);
>> +
>>   	rte_rwlock_write_unlock(&vq->iotlb_lock);
>> +
> 
> This empty line should be removed.

Yes, this part disappears in next version, as I squashed patch 21 in
patches 7 & 8.

Thanks,
Maxime
> Best regards,
> Tiwei Bie
> 
>>   }
>>   
>>   void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
  

Patch

diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
index 1b739dae5..d014bfe98 100644
--- a/lib/librte_vhost/iotlb.c
+++ b/lib/librte_vhost/iotlb.c
@@ -49,7 +49,86 @@  struct vhost_iotlb_entry {
 	uint8_t perm;
 };
 
-#define IOTLB_CACHE_SIZE 1024
+#define IOTLB_CACHE_SIZE 2048
+
+static void vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
+{
+	struct vhost_iotlb_entry *node, *temp_node;
+
+	rte_rwlock_write_lock(&vq->iotlb_lock);
+
+	TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+		TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+		rte_mempool_put(vq->iotlb_pool, node);
+	}
+
+	rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
+				uint8_t perm)
+{
+	struct vhost_iotlb_entry *node;
+	int found = 0;
+
+	rte_rwlock_read_lock(&vq->iotlb_lock);
+
+	TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
+		if ((node->iova == iova) && (node->perm == perm)) {
+			found = 1;
+			break;
+		}
+	}
+
+	rte_rwlock_read_unlock(&vq->iotlb_lock);
+
+	return found;
+}
+
+void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
+				uint64_t iova, uint8_t perm)
+{
+	struct vhost_iotlb_entry *node;
+	int ret;
+
+	ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+	if (ret) {
+		RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool empty, invalidate cache\n");
+		vhost_user_iotlb_pending_remove_all(vq);
+		ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+		if (ret) {
+			RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n");
+			return;
+		}
+	}
+
+	node->iova = iova;
+	node->perm = perm;
+
+	rte_rwlock_write_lock(&vq->iotlb_lock);
+
+	TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
+
+	rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+static void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
+				uint64_t iova, uint64_t size, uint8_t perm)
+{
+	struct vhost_iotlb_entry *node, *temp_node;
+
+	/* .iotlb_lock already locked by the caller */
+	TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+		if (node->iova < iova)
+			continue;
+		if (node->iova >= iova + size)
+			continue;
+		if ((node->perm & perm) != node->perm)
+			continue;
+		TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+		rte_mempool_put(vq->iotlb_pool, node);
+	}
+}
 
 static void vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
 {
@@ -106,7 +185,10 @@  void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
 	TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
 
 unlock:
+	vhost_user_iotlb_pending_remove(vq, iova, size, perm);
+
 	rte_rwlock_write_unlock(&vq->iotlb_lock);
+
 }
 
 void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
@@ -189,9 +271,10 @@  int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
 	if (vq->iotlb_pool) {
 		/*
 		 * The cache has already been initialized,
-		 * just drop all entries
+		 * just drop all cached and pending entries.
 		 */
 		vhost_user_iotlb_cache_remove_all(vq);
+		vhost_user_iotlb_pending_remove_all(vq);
 		return 0;
 	}
 
@@ -204,6 +287,7 @@  int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
 	rte_rwlock_init(&vq->iotlb_lock);
 
 	TAILQ_INIT(&vq->iotlb_list);
+	TAILQ_INIT(&vq->iotlb_pending_list);
 
 	snprintf(pool_name, sizeof(pool_name), "iotlb_cache_%d_%d",
 			dev->vid, vq_index);
diff --git a/lib/librte_vhost/iotlb.h b/lib/librte_vhost/iotlb.h
index 459820762..4be1f7e85 100644
--- a/lib/librte_vhost/iotlb.h
+++ b/lib/librte_vhost/iotlb.h
@@ -41,6 +41,10 @@  void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
 					uint64_t iova, uint64_t size);
 uint64_t vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
 					uint64_t *size, uint8_t perm);
+int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
+						uint8_t perm);
+void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova,
+						uint8_t perm);
 int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);
 
 #endif /* _VHOST_IOTLB_H_ */
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 7816a92b5..a41bacea7 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -119,6 +119,7 @@  struct vhost_virtqueue {
 	rte_rwlock_t	iotlb_lock;
 	struct rte_mempool *iotlb_pool;
 	TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
+	TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
 } __rte_cache_aligned;
 
 /* Old kernels have no such macros defined */