[12/28] vhost: move slave request fd and lock

Message ID 1560957293-17294-13-git-send-email-ndragazis@arrikto.com (mailing list archive)
State RFC, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: add virtio-vhost-user transport |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Nikos Dragazis June 19, 2019, 3:14 p.m. UTC
  The slave request file descriptor is specific to the AF_UNIX transport.
Move this field along with its spinlock out of struct virtio_net and
into the trans_af_unix.c private struct vhost_user_connection struct.
This implies that we also had to move the associated functions
send_vhost_slave_message() and process_slave_message_reply() out of
vhost_user.c and into trans_af_unix.c. We also moved the spinlock
initialization out of vhost_new_connection() and into trans_af_unix.c.

This change will allow future transports to implement the slave request
fd without relying on socket I/O.

Signed-off-by: Nikos Dragazis <ndragazis@arrikto.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 lib/librte_vhost/trans_af_unix.c | 87 +++++++++++++++++++++++++++++++++++++++-
 lib/librte_vhost/vhost.c         |  4 +-
 lib/librte_vhost/vhost.h         | 41 +++++++++++++++++--
 lib/librte_vhost/vhost_user.c    | 67 ++++---------------------------
 4 files changed, 132 insertions(+), 67 deletions(-)
  

Patch

diff --git a/lib/librte_vhost/trans_af_unix.c b/lib/librte_vhost/trans_af_unix.c
index c0ba8df..5f9ef5a 100644
--- a/lib/librte_vhost/trans_af_unix.c
+++ b/lib/librte_vhost/trans_af_unix.c
@@ -29,6 +29,8 @@  struct vhost_user_connection {
 	struct virtio_net device; /* must be the first field! */
 	struct vhost_user_socket *vsocket;
 	int connfd;
+	int slave_req_fd;
+	rte_spinlock_t slave_req_lock;
 
 	TAILQ_ENTRY(vhost_user_connection) next;
 };
@@ -41,6 +43,7 @@  struct af_unix_socket {
 	struct sockaddr_un un;
 };
 
+int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
 static int create_unix_socket(struct vhost_user_socket *vsocket);
 static int vhost_user_start_server(struct vhost_user_socket *vsocket);
 static int vhost_user_start_client(struct vhost_user_socket *vsocket);
@@ -161,8 +164,71 @@  af_unix_send_reply(struct virtio_net *dev, struct VhostUserMsg *msg)
 static int
 af_unix_send_slave_req(struct virtio_net *dev, struct VhostUserMsg *msg)
 {
-	return send_fd_message(dev->slave_req_fd, msg,
-			       VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+	int ret;
+
+	if (msg->flags & VHOST_USER_NEED_REPLY)
+		rte_spinlock_lock(&conn->slave_req_lock);
+
+	ret = send_fd_message(conn->slave_req_fd, msg,
+			VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
+
+	if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
+		rte_spinlock_unlock(&conn->slave_req_lock);
+
+	return ret;
+}
+
+static int
+af_unix_process_slave_message_reply(struct virtio_net *dev,
+				    const struct VhostUserMsg *msg)
+{
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+	struct VhostUserMsg msg_reply;
+	int ret;
+
+	if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
+		return 0;
+
+	if (read_vhost_message(conn->slave_req_fd, &msg_reply) < 0) {
+		ret = -1;
+		goto out;
+	}
+
+	if (msg_reply.request.slave != msg->request.slave) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"Received unexpected msg type (%u), expected %u\n",
+			msg_reply.request.slave, msg->request.slave);
+		ret = -1;
+		goto out;
+	}
+
+	ret = msg_reply.payload.u64 ? -1 : 0;
+
+out:
+	rte_spinlock_unlock(&conn->slave_req_lock);
+	return ret;
+}
+
+static int
+af_unix_set_slave_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
+{
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+	int fd = msg->fds[0];
+
+	if (fd < 0) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+				"Invalid file descriptor for slave channel (%d)\n",
+				fd);
+		return -1;
+	}
+
+	conn->slave_req_fd = fd;
+
+	return 0;
 }
 
 static void
@@ -185,7 +251,9 @@  vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 
 	conn = container_of(dev, struct vhost_user_connection, device);
 	conn->connfd = fd;
+	conn->slave_req_fd = -1;
 	conn->vsocket = vsocket;
+	rte_spinlock_init(&conn->slave_req_lock);
 
 	size = strnlen(vsocket->path, PATH_MAX);
 	vhost_set_ifname(dev->vid, vsocket->path, size);
@@ -682,6 +750,18 @@  af_unix_socket_start(struct vhost_user_socket *vsocket)
 		return vhost_user_start_client(vsocket);
 }
 
+static void
+af_unix_cleanup_device(struct virtio_net *dev, int destroy __rte_unused)
+{
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+
+	if (conn->slave_req_fd >= 0) {
+		close(conn->slave_req_fd);
+		conn->slave_req_fd = -1;
+	}
+}
+
 static int
 af_unix_vring_call(struct virtio_net *dev __rte_unused,
 		   struct vhost_virtqueue *vq)
@@ -697,7 +777,10 @@  const struct vhost_transport_ops af_unix_trans_ops = {
 	.socket_init = af_unix_socket_init,
 	.socket_cleanup = af_unix_socket_cleanup,
 	.socket_start = af_unix_socket_start,
+	.cleanup_device = af_unix_cleanup_device,
 	.vring_call = af_unix_vring_call,
 	.send_reply = af_unix_send_reply,
 	.send_slave_req = af_unix_send_slave_req,
+	.process_slave_message_reply = af_unix_process_slave_message_reply,
+	.set_slave_req_fd = af_unix_set_slave_req_fd,
 };
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 0fdc54f..5b16390 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -256,6 +256,8 @@  cleanup_device(struct virtio_net *dev, int destroy)
 
 	for (i = 0; i < dev->nr_vring; i++)
 		cleanup_vq(dev->virtqueue[i], destroy);
+
+	dev->trans_ops->cleanup_device(dev, destroy);
 }
 
 void
@@ -508,11 +510,9 @@  vhost_new_device(const struct vhost_transport_ops *trans_ops)
 	vhost_devices[i] = dev;
 	dev->vid = i;
 	dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
-	dev->slave_req_fd = -1;
 	dev->trans_ops = trans_ops;
 	dev->vdpa_dev_id = -1;
 	dev->postcopy_ufd = -1;
-	rte_spinlock_init(&dev->slave_req_lock);
 
 	return dev;
 }
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index b20773c..2213fbe 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -340,6 +340,16 @@  struct vhost_transport_ops {
 	int (*socket_start)(struct vhost_user_socket *vsocket);
 
 	/**
+	* Free resources associated with this device.
+	*
+	* @param dev
+	*  vhost device
+	* @param destroy
+	*  0 on device reset, 1 on full cleanup.
+	*/
+	void (*cleanup_device)(struct virtio_net *dev, int destroy);
+
+	/**
 	 * Notify the guest that used descriptors have been added to the vring.
 	 * The VRING_AVAIL_F_NO_INTERRUPT flag and event idx have already been checked
 	 * so this function just needs to perform the notification.
@@ -377,6 +387,34 @@  struct vhost_transport_ops {
 	 */
 	int (*send_slave_req)(struct virtio_net *dev,
 			      struct VhostUserMsg *req);
+
+	/**
+	 * Process the master's reply on a slave request.
+	 *
+	 * @param dev
+	 *  vhost device
+	 * @param msg
+	 *  slave request message
+	 * @return
+	 *  0 on success, -1 on failure
+	 */
+	int (*process_slave_message_reply)(struct virtio_net *dev,
+					   const struct VhostUserMsg *msg);
+
+	/**
+	 * Process VHOST_USER_SET_SLAVE_REQ_FD message.  After this function
+	 * succeeds send_slave_req() may be called to submit requests to the
+	 * master.
+	 *
+	 * @param dev
+	 *  vhost device
+	 * @param msg
+	 *  message
+	 * @return
+	 *  0 on success, -1 on failure
+	 */
+	int (*set_slave_req_fd)(struct virtio_net *dev,
+				struct VhostUserMsg *msg);
 };
 
 /** The traditional AF_UNIX vhost-user protocol transport. */
@@ -419,9 +457,6 @@  struct virtio_net {
 	uint32_t		max_guest_pages;
 	struct guest_page       *guest_pages;
 
-	int			slave_req_fd;
-	rte_spinlock_t		slave_req_lock;
-
 	int			postcopy_ufd;
 	int			postcopy_listening;
 
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 5c12435..a4dcba1 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -160,11 +160,6 @@  vhost_backend_cleanup(struct virtio_net *dev)
 		dev->log_addr = 0;
 	}
 
-	if (dev->slave_req_fd >= 0) {
-		close(dev->slave_req_fd);
-		dev->slave_req_fd = -1;
-	}
-
 	if (dev->postcopy_ufd >= 0) {
 		close(dev->postcopy_ufd);
 		dev->postcopy_ufd = -1;
@@ -1549,17 +1544,13 @@  static int
 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			int main_fd __rte_unused)
 {
+	int ret;
 	struct virtio_net *dev = *pdev;
-	int fd = msg->fds[0];
 
-	if (fd < 0) {
-		RTE_LOG(ERR, VHOST_CONFIG,
-				"Invalid file descriptor for slave channel (%d)\n",
-				fd);
-		return RTE_VHOST_MSG_RESULT_ERR;
-	}
+	ret = dev->trans_ops->set_slave_req_fd(dev, msg);
 
-	dev->slave_req_fd = fd;
+	if (ret < 0)
+		return RTE_VHOST_MSG_RESULT_ERR;
 
 	return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -1778,21 +1769,6 @@  send_vhost_reply(struct virtio_net *dev, struct VhostUserMsg *msg)
 	return dev->trans_ops->send_reply(dev, msg);
 }
 
-static int
-send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
-{
-	int ret;
-
-	if (msg->flags & VHOST_USER_NEED_REPLY)
-		rte_spinlock_lock(&dev->slave_req_lock);
-
-	ret = dev->trans_ops->send_slave_req(dev, msg);
-	if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
-		rte_spinlock_unlock(&dev->slave_req_lock);
-
-	return ret;
-}
-
 /*
  * Allocate a queue pair if it hasn't been allocated yet
  */
@@ -2069,35 +2045,6 @@  vhost_user_msg_handler(int vid, int fd, const struct VhostUserMsg *msg_)
 	return 0;
 }
 
-static int process_slave_message_reply(struct virtio_net *dev,
-				       const struct VhostUserMsg *msg)
-{
-	struct VhostUserMsg msg_reply;
-	int ret;
-
-	if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
-		return 0;
-
-	if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
-		ret = -1;
-		goto out;
-	}
-
-	if (msg_reply.request.slave != msg->request.slave) {
-		RTE_LOG(ERR, VHOST_CONFIG,
-			"Received unexpected msg type (%u), expected %u\n",
-			msg_reply.request.slave, msg->request.slave);
-		ret = -1;
-		goto out;
-	}
-
-	ret = msg_reply.payload.u64 ? -1 : 0;
-
-out:
-	rte_spinlock_unlock(&dev->slave_req_lock);
-	return ret;
-}
-
 int
 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
 {
@@ -2113,7 +2060,7 @@  vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
 		},
 	};
 
-	ret = send_vhost_slave_req(dev, &msg);
+	ret = dev->trans_ops->send_slave_req(dev, &msg);
 	if (ret < 0) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 				"Failed to send IOTLB miss message (%d)\n",
@@ -2148,14 +2095,14 @@  static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
 		msg.fd_num = 1;
 	}
 
-	ret = send_vhost_slave_message(dev, &msg);
+	ret = dev->trans_ops->send_slave_req(dev, &msg);
 	if (ret < 0) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"Failed to set host notifier (%d)\n", ret);
 		return ret;
 	}
 
-	return process_slave_message_reply(dev, &msg);
+	return dev->trans_ops->process_slave_message_reply(dev, &msg);
 }
 
 int rte_vhost_host_notifier_ctrl(int vid, bool enable)