[dpdk-dev,05/21] vhost: add support to slave requests channel
Checks
Commit Message
Currently, only QEMU sends requests, the backend sends
replies. In some cases, the backend may need to send
requests to QEMU, like IOTLB miss events when IOMMU is
supported.
This patch introduces a new channel for such requests.
QEMU sends a file descriptor of a new socket using
VHOST_USER_SET_SLAVE_REQ_FD.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/librte_vhost/vhost.h | 2 ++
lib/librte_vhost/vhost_user.c | 27 +++++++++++++++++++++++++++
lib/librte_vhost/vhost_user.h | 10 +++++++++-
3 files changed, 38 insertions(+), 1 deletion(-)
Comments
On Thu, Aug 31, 2017 at 11:50:07AM +0200, Maxime Coquelin wrote:
> Currently, only QEMU sends requests, the backend sends
> replies. In some cases, the backend may need to send
> requests to QEMU, like IOTLB miss events when IOMMU is
> supported.
>
> This patch introduces a new channel for such requests.
> QEMU sends a file descriptor of a new socket using
> VHOST_USER_SET_SLAVE_REQ_FD.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> lib/librte_vhost/vhost.h | 2 ++
> lib/librte_vhost/vhost_user.c | 27 +++++++++++++++++++++++++++
> lib/librte_vhost/vhost_user.h | 10 +++++++++-
> 3 files changed, 38 insertions(+), 1 deletion(-)
>
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 18ad69c85..2340b0c2a 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -196,6 +196,8 @@ struct virtio_net {
> uint32_t nr_guest_pages;
> uint32_t max_guest_pages;
> struct guest_page *guest_pages;
> +
> + int slave_req_fd;
> } __rte_cache_aligned;
>
>
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index 8984dcb6a..7b3c2f32a 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -76,6 +76,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
> [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
> [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
> [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
> + [VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD",
> };
>
> static uint64_t
> @@ -122,6 +123,11 @@ vhost_backend_cleanup(struct virtio_net *dev)
> munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
> dev->log_addr = 0;
> }
> +
> + if (dev->slave_req_fd >= 0) {
> + close(dev->slave_req_fd);
> + dev->slave_req_fd = -1;
The slave_req_fd should also be initialized to -1 when allocating
the virtio_net structure. Currently, it's missing.
Best regards,
Tiwei Bie
On 09/05/2017 06:19 AM, Tiwei Bie wrote:
> On Thu, Aug 31, 2017 at 11:50:07AM +0200, Maxime Coquelin wrote:
>> Currently, only QEMU sends requests, the backend sends
>> replies. In some cases, the backend may need to send
>> requests to QEMU, like IOTLB miss events when IOMMU is
>> supported.
>>
>> This patch introduces a new channel for such requests.
>> QEMU sends a file descriptor of a new socket using
>> VHOST_USER_SET_SLAVE_REQ_FD.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>> lib/librte_vhost/vhost.h | 2 ++
>> lib/librte_vhost/vhost_user.c | 27 +++++++++++++++++++++++++++
>> lib/librte_vhost/vhost_user.h | 10 +++++++++-
>> 3 files changed, 38 insertions(+), 1 deletion(-)
>>
>> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
>> index 18ad69c85..2340b0c2a 100644
>> --- a/lib/librte_vhost/vhost.h
>> +++ b/lib/librte_vhost/vhost.h
>> @@ -196,6 +196,8 @@ struct virtio_net {
>> uint32_t nr_guest_pages;
>> uint32_t max_guest_pages;
>> struct guest_page *guest_pages;
>> +
>> + int slave_req_fd;
>> } __rte_cache_aligned;
>>
>>
>> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
>> index 8984dcb6a..7b3c2f32a 100644
>> --- a/lib/librte_vhost/vhost_user.c
>> +++ b/lib/librte_vhost/vhost_user.c
>> @@ -76,6 +76,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
>> [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
>> [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
>> [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
>> + [VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD",
>> };
>>
>> static uint64_t
>> @@ -122,6 +123,11 @@ vhost_backend_cleanup(struct virtio_net *dev)
>> munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
>> dev->log_addr = 0;
>> }
>> +
>> + if (dev->slave_req_fd >= 0) {
>> + close(dev->slave_req_fd);
>> + dev->slave_req_fd = -1;
>
> The slave_req_fd should also be initialized to -1 when allocating
> the virtio_net structure. Currently, it's missing.
Good catch, thanks for spotting this.
Maxime
> Best regards,
> Tiwei Bie
>
@@ -196,6 +196,8 @@ struct virtio_net {
uint32_t nr_guest_pages;
uint32_t max_guest_pages;
struct guest_page *guest_pages;
+
+ int slave_req_fd;
} __rte_cache_aligned;
@@ -76,6 +76,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
[VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
[VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
+ [VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD",
};
static uint64_t
@@ -122,6 +123,11 @@ vhost_backend_cleanup(struct virtio_net *dev)
munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
dev->log_addr = 0;
}
+
+ if (dev->slave_req_fd >= 0) {
+ close(dev->slave_req_fd);
+ dev->slave_req_fd = -1;
+ }
}
/*
@@ -844,6 +850,23 @@ vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
return 0;
}
+static int
+vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
+{
+ int fd = msg->fds[0];
+
+ if (fd < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Invalid file descriptor for slave channel (%d)\n",
+ fd);
+ return -1;
+ }
+
+ dev->slave_req_fd = fd;
+
+ return 0;
+}
+
/* return bytes# of read on success or negative val on failure. */
static int
read_vhost_message(int sockfd, struct VhostUserMsg *msg)
@@ -1073,6 +1096,10 @@ vhost_user_msg_handler(int vid, int fd)
ret = vhost_user_net_set_mtu(dev, &msg);
break;
+ case VHOST_USER_SET_SLAVE_REQ_FD:
+ ret = vhost_user_set_req_fd(dev, &msg);
+ break;
+
default:
ret = -1;
break;
@@ -48,12 +48,14 @@
#define VHOST_USER_PROTOCOL_F_RARP 2
#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
#define VHOST_USER_PROTOCOL_F_NET_MTU 4
+#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
#define VHOST_USER_PROTOCOL_FEATURES ((1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
(1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |\
(1ULL << VHOST_USER_PROTOCOL_F_RARP) | \
(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
- (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))
+ (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ))
typedef enum VhostUserRequest {
VHOST_USER_NONE = 0,
@@ -77,9 +79,15 @@ typedef enum VhostUserRequest {
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_SEND_RARP = 19,
VHOST_USER_NET_SET_MTU = 20,
+ VHOST_USER_SET_SLAVE_REQ_FD = 21,
VHOST_USER_MAX
} VhostUserRequest;
+typedef enum VhostUserSlaveRequest {
+ VHOST_USER_SLAVE_NONE = 0,
+ VHOST_USER_SLAVE_MAX
+} VhostUserSlaveRequest;
+
typedef struct VhostUserMemoryRegion {
uint64_t guest_phys_addr;
uint64_t memory_size;