From patchwork Wed Jul 29 13:36:24 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 74998 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7374EA052B; Wed, 29 Jul 2020 15:36:53 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 16E374C98; Wed, 29 Jul 2020 15:36:51 +0200 (CEST) Received: from us-smtp-delivery-1.mimecast.com (us-smtp-2.mimecast.com [205.139.110.61]) by dpdk.org (Postfix) with ESMTP id B71DF10A3 for ; Wed, 29 Jul 2020 15:36:45 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1596029805; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=KRV2asHxOKGWYvicn9QemiZk/MqSdTVl6dp5mW567SA=; b=duB4e5RK6cWvtOsoYvynL0c67fTS7aIFfb9yl97Fu/GHlvKSFdNAI3FlzXTJ3hZ74asoMt koqE4IOLcHIVuWUhSCFkPPiLf66o6/xkSwKDK+tHBgzwOt6Rm/XE1++He1k4wmNSuh07zN n/9OKJg2ap9jVL9Xc/C2leRWA9omWNo= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-202-Bv9v6_mnMVWvsEsgYXaIIw-1; Wed, 29 Jul 2020 09:36:43 -0400 X-MC-Unique: Bv9v6_mnMVWvsEsgYXaIIw-1 Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 00EB6101C8A5; Wed, 29 Jul 2020 13:36:42 +0000 (UTC) Received: from localhost.localdomain (unknown [10.36.110.21]) by smtp.corp.redhat.com (Postfix) with ESMTP id AF6B661100; Wed, 29 Jul 2020 13:36:39 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, matan@mellanox.com, chenbo.xia@intel.com, yong.liu@intel.com, yinan.wang@intel.com Cc: thomas@monjalon.net, ferruh.yigit@intel.com, david.marchand@redhat.com, Maxime Coquelin Date: Wed, 29 Jul 2020 15:36:24 +0200 Message-Id: <20200729133626.237098-2-maxime.coquelin@redhat.com> In-Reply-To: <20200729133626.237098-1-maxime.coquelin@redhat.com> References: <20200729133626.237098-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.12 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Subject: [dpdk-dev] [PATCH v4 1/3] vhost: fix guest notification setting X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" If rte_vhost_enable_guest_notification is called before the virtqueue is ready, the configuration is lost. This patch fixes this by saving the guest notification enablement value requested by the application, and apply it before the virtqueue is made ready to the application. Fixes: 604052ae5395 ("net/vhost: support queue update") Reported-by: Yinan Wang Tested-by: Yinan Wang Signed-off-by: Maxime Coquelin Reviewed-by: Chenbo Xia --- lib/librte_vhost/vhost.c | 24 ++++++++++++++++++++---- lib/librte_vhost/vhost.h | 5 +++++ lib/librte_vhost/vhost_user.c | 11 ++++++++--- 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 14b3e253e8..8f20a0818f 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -534,6 +534,7 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; + vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF; vhost_user_iotlb_init(dev, vring_idx); /* Backends are set to -1 indicating an inactive device. */ @@ -1311,6 +1312,23 @@ vhost_enable_notify_packed(struct virtio_net *dev, return 0; } +int +vhost_enable_guest_notification(struct virtio_net *dev, + struct vhost_virtqueue *vq, int enable) +{ + /* + * If the virtqueue is not ready yet, it will be applied + * when it will become ready. + */ + if (!vq->ready) + return 0; + + if (vq_is_packed(dev)) + return vhost_enable_notify_packed(dev, vq, enable); + else + return vhost_enable_notify_split(dev, vq, enable); +} + int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) { @@ -1325,10 +1343,8 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) rte_spinlock_lock(&vq->access_lock); - if (vq_is_packed(dev)) - ret = vhost_enable_notify_packed(dev, vq, enable); - else - ret = vhost_enable_notify_split(dev, vq, enable); + vq->notif_enable = enable; + ret = vhost_enable_guest_notification(dev, vq, enable); rte_spinlock_unlock(&vq->access_lock); diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 0f7212f888..a29c6638e2 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -164,6 +164,9 @@ struct vhost_virtqueue { int enabled; int access_ok; int ready; + int notif_enable; +#define VIRTIO_UNINITIALIZED_NOTIF (-1) + rte_spinlock_t access_lock; /* Used to notify the guest (trigger interrupt) */ @@ -668,6 +671,8 @@ void vhost_enable_dequeue_zero_copy(int vid); void vhost_set_builtin_virtio_net(int vid, bool enable); void vhost_enable_extbuf(int vid); void vhost_enable_linearbuf(int vid); +int vhost_enable_guest_notification(struct virtio_net *dev, + struct vhost_virtqueue *vq, int enable); struct vhost_device_ops const *vhost_driver_callback_get(const char *path); diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 9ddeae3622..c3c924faec 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -235,6 +235,11 @@ vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index, int enable) { struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; + struct vhost_virtqueue *vq = dev->virtqueue[index]; + + /* Configure guest notifications on enable */ + if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF) + vhost_enable_guest_notification(dev, vq, vq->notif_enable); if (vdpa_dev && vdpa_dev->ops->set_vring_state) vdpa_dev->ops->set_vring_state(dev->vid, index, enable); @@ -1640,8 +1645,8 @@ vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg, vq = dev->virtqueue[file.index]; if (vq->ready) { - vhost_user_notify_queue_state(dev, file.index, 0); vq->ready = 0; + vhost_user_notify_queue_state(dev, file.index, 0); } if (vq->callfd >= 0) @@ -1903,8 +1908,8 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, } if (vq->ready) { - vhost_user_notify_queue_state(dev, file.index, 0); vq->ready = 0; + vhost_user_notify_queue_state(dev, file.index, 0); } if (vq->kickfd >= 0) @@ -2917,8 +2922,8 @@ vhost_user_msg_handler(int vid, int fd) bool cur_ready = vq_is_ready(dev, vq); if (cur_ready != (vq && vq->ready)) { - vhost_user_notify_queue_state(dev, i, cur_ready); vq->ready = cur_ready; + vhost_user_notify_queue_state(dev, i, cur_ready); } } From patchwork Wed Jul 29 13:36:25 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 75000 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4310BA052B; Wed, 29 Jul 2020 15:37:13 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 129551C036; Wed, 29 Jul 2020 15:36:56 +0200 (CEST) Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [205.139.110.120]) by dpdk.org (Postfix) with ESMTP id 800E71BFFA for ; Wed, 29 Jul 2020 15:36:51 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1596029811; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=LvJH+IHwf08tG8tY7iGdf0WKR9EU3XNTS+8hoRtMMts=; b=Xf25pV6MP5NDq9i6EwkNIFEbblV65JPxokFeJcRINw3/jw6X1Q3UZsysI0Mq7pYDVvF7fh gxGdGOISBRUYpuBuJqsEtH48YC81krZT+LMYDMKXUPqpNgDR0OQiINYdHvKWe647Orim8n ddYsBwKbRsjyIJUt+u2Ntizoh2qtsgA= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-356-Hv8G5yZXNZiKw0Miki5FEA-1; Wed, 29 Jul 2020 09:36:46 -0400 X-MC-Unique: Hv8G5yZXNZiKw0Miki5FEA-1 Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id C6DD718C63C0; Wed, 29 Jul 2020 13:36:44 +0000 (UTC) Received: from localhost.localdomain (unknown [10.36.110.21]) by smtp.corp.redhat.com (Postfix) with ESMTP id 5B46B61176; Wed, 29 Jul 2020 13:36:42 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, matan@mellanox.com, chenbo.xia@intel.com, yong.liu@intel.com, yinan.wang@intel.com Cc: thomas@monjalon.net, ferruh.yigit@intel.com, david.marchand@redhat.com, Maxime Coquelin Date: Wed, 29 Jul 2020 15:36:25 +0200 Message-Id: <20200729133626.237098-3-maxime.coquelin@redhat.com> In-Reply-To: <20200729133626.237098-1-maxime.coquelin@redhat.com> References: <20200729133626.237098-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.12 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Subject: [dpdk-dev] [PATCH v4 2/3] net/vhost: fix queue update X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Now that the vhost library saves the guest notifications enablement value in its virtqueues metadata, it is not necessary to do it in the vring_state_changed callback. One effect of the patch is also to prevent possible deadlock happening in vhost library. Fixes: 604052ae5395 ("net/vhost: support queue update") Reported-by: Yinan Wang Tested-by: Yinan Wang Reviewed-by: Chenbo Xia Signed-off-by: Maxime Coquelin --- drivers/net/vhost/rte_eth_vhost.c | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index bbf79b2c0e..951929c663 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -94,7 +94,6 @@ struct vhost_queue { struct rte_mempool *mb_pool; uint16_t port; uint16_t virtqueue_id; - bool intr_en; struct vhost_stats stats; }; @@ -547,8 +546,6 @@ eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid) rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); rte_wmb(); - vq->intr_en = true; - return ret; } @@ -574,8 +571,6 @@ eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid) rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0); rte_wmb(); - vq->intr_en = false; - return 0; } @@ -841,7 +836,6 @@ vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id) struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; struct pmd_internal *internal = eth_dev->data->dev_private; struct rte_vhost_vring vring; - struct vhost_queue *vq; int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1; int ret = 0; @@ -853,21 +847,17 @@ vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id) rte_atomic32_read(&internal->dev_attached) && rte_atomic32_read(&internal->started) && dev_conf->intr_conf.rxq) { - vq = eth_dev->data->rx_queues[rx_idx]; ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring); - if (!ret) { - if (vring.kickfd != - eth_dev->intr_handle->efds[rx_idx]) { - VHOST_LOG(INFO, - "kickfd for rxq-%d was changed.\n", - rx_idx); - eth_dev->intr_handle->efds[rx_idx] = - vring.kickfd; - } + if (ret) { + VHOST_LOG(ERR, "Failed to get vring %d information.\n", + vring_id); + return ret; + } - rte_vhost_enable_guest_notification(vid, vring_id, - vq->intr_en); - rte_wmb(); + if (vring.kickfd != eth_dev->intr_handle->efds[rx_idx]) { + VHOST_LOG(INFO, "kickfd for rxq-%d was changed.\n", + rx_idx); + eth_dev->intr_handle->efds[rx_idx] = vring.kickfd; } } From patchwork Wed Jul 29 13:36:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 74999 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id AFD61A052B; Wed, 29 Jul 2020 15:37:05 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DB9A91C027; Wed, 29 Jul 2020 15:36:54 +0200 (CEST) Received: from us-smtp-delivery-1.mimecast.com (us-smtp-2.mimecast.com [207.211.31.81]) by dpdk.org (Postfix) with ESMTP id 4F4791BFF5 for ; Wed, 29 Jul 2020 15:36:51 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1596029810; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=Uix8vDQOfVtm5LQexodhgwOtOKn89VW9QhseJ7JfsGE=; b=UZBzvmmd4sYnTnV2CJ/iCVIjqNXUN6sHcTht7n64S7TW/uJ9K1xW5Ygxine5pnfzoBpss1 WFSOBHdzQRVqKkRLwnlhyWz1jbggbaLDynLzk/A+B4oa+DS+KbA7ONoYM5b1T072K6zRE7 FE9UHrupX2rwX/pV9bK8Mw9hdVirUc8= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-93-ZGhVvb07OSW6D2AYIA-fLA-1; Wed, 29 Jul 2020 09:36:49 -0400 X-MC-Unique: ZGhVvb07OSW6D2AYIA-fLA-1 Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 923FF8017FB; Wed, 29 Jul 2020 13:36:47 +0000 (UTC) Received: from localhost.localdomain (unknown [10.36.110.21]) by smtp.corp.redhat.com (Postfix) with ESMTP id 4138C60CD1; Wed, 29 Jul 2020 13:36:45 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, matan@mellanox.com, chenbo.xia@intel.com, yong.liu@intel.com, yinan.wang@intel.com Cc: thomas@monjalon.net, ferruh.yigit@intel.com, david.marchand@redhat.com, Maxime Coquelin Date: Wed, 29 Jul 2020 15:36:26 +0200 Message-Id: <20200729133626.237098-4-maxime.coquelin@redhat.com> In-Reply-To: <20200729133626.237098-1-maxime.coquelin@redhat.com> References: <20200729133626.237098-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.12 Authentication-Results: relay.mimecast.com; auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Subject: [dpdk-dev] [PATCH v4 3/3] net/vhost: fix interrupt mode X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" At .new_device() time, only the first vring pair is now ready, other vrings are configured later. Problem is that when application will setup and enable interrupts, only the first queue pair Rx interrupt will be enabled. This patches fixes the issue by setting the number of max interrupts to the number of Rx queues that will be later initialized. Then, as soon as a Rx vring is ready and interrupt enabled by the application, it removes the corresponding uninitialized epoll event, and installs a new one with the valid FD. Fixes: 604052ae5395 ("net/vhost: support queue update") Signed-off-by: Maxime Coquelin Reviewed-by: Chenbo Xia Tested-by: Xiao Qimai --- drivers/net/vhost/rte_eth_vhost.c | 91 +++++++++++++++++++++++++++---- 1 file changed, 81 insertions(+), 10 deletions(-) diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 951929c663..e55278af69 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -95,6 +96,8 @@ struct vhost_queue { uint16_t port; uint16_t virtqueue_id; struct vhost_stats stats; + int intr_enable; + rte_spinlock_t intr_lock; }; struct pmd_internal { @@ -524,12 +527,58 @@ find_internal_resource(char *ifname) return list; } +static int +eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx) +{ + struct rte_intr_handle *handle = eth_dev->intr_handle; + struct rte_epoll_event rev; + int epfd, ret; + + if (!handle) + return 0; + + if (handle->efds[rxq_idx] == handle->elist[rxq_idx].fd) + return 0; + + VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n", + rxq_idx); + + if (handle->elist[rxq_idx].fd != -1) + VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n", + handle->elist[rxq_idx].fd); + + /* + * First remove invalid epoll event, and then install + * the new one. May be solved with a proper API in the + * future. + */ + epfd = handle->elist[rxq_idx].epfd; + rev = handle->elist[rxq_idx]; + ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd, + &handle->elist[rxq_idx]); + if (ret) { + VHOST_LOG(ERR, "Delete epoll event failed.\n"); + return ret; + } + + rev.fd = handle->efds[rxq_idx]; + handle->elist[rxq_idx] = rev; + ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd, + &handle->elist[rxq_idx]); + if (ret) { + VHOST_LOG(ERR, "Add epoll event failed.\n"); + return ret; + } + + return 0; +} + static int eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid) { struct rte_vhost_vring vring; struct vhost_queue *vq; - int ret = 0; + int old_intr_enable, ret = 0; vq = dev->data->rx_queues[qid]; if (!vq) { @@ -537,6 +586,18 @@ eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid) return -1; } + rte_spinlock_lock(&vq->intr_lock); + old_intr_enable = vq->intr_enable; + vq->intr_enable = 1; + ret = eth_vhost_update_intr(dev, qid); + rte_spinlock_unlock(&vq->intr_lock); + + if (ret < 0) { + VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid); + vq->intr_enable = old_intr_enable; + return ret; + } + ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); if (ret < 0) { VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid); @@ -571,6 +632,8 @@ eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid) rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0); rte_wmb(); + vq->intr_enable = 0; + return 0; } @@ -593,7 +656,6 @@ eth_vhost_install_intr(struct rte_eth_dev *dev) { struct rte_vhost_vring vring; struct vhost_queue *vq; - int count = 0; int nb_rxq = dev->data->nb_rx_queues; int i; int ret; @@ -623,6 +685,8 @@ eth_vhost_install_intr(struct rte_eth_dev *dev) VHOST_LOG(INFO, "Prepare intr vec\n"); for (i = 0; i < nb_rxq; i++) { + dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + dev->intr_handle->efds[i] = -1; vq = dev->data->rx_queues[i]; if (!vq) { VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i); @@ -641,14 +705,12 @@ eth_vhost_install_intr(struct rte_eth_dev *dev) "rxq-%d's kickfd is invalid, skip!\n", i); continue; } - dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; dev->intr_handle->efds[i] = vring.kickfd; - count++; VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i); } - dev->intr_handle->nb_efd = count; - dev->intr_handle->max_intr = count + 1; + dev->intr_handle->nb_efd = nb_rxq; + dev->intr_handle->max_intr = nb_rxq + 1; dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; return 0; @@ -835,6 +897,7 @@ vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id) { struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; struct pmd_internal *internal = eth_dev->data->dev_private; + struct vhost_queue *vq; struct rte_vhost_vring vring; int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1; int ret = 0; @@ -853,12 +916,18 @@ vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id) vring_id); return ret; } + eth_dev->intr_handle->efds[rx_idx] = vring.kickfd; - if (vring.kickfd != eth_dev->intr_handle->efds[rx_idx]) { - VHOST_LOG(INFO, "kickfd for rxq-%d was changed.\n", - rx_idx); - eth_dev->intr_handle->efds[rx_idx] = vring.kickfd; + vq = eth_dev->data->rx_queues[rx_idx]; + if (!vq) { + VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx); + return -1; } + + rte_spinlock_lock(&vq->intr_lock); + if (vq->intr_enable) + ret = eth_vhost_update_intr(eth_dev, rx_idx); + rte_spinlock_unlock(&vq->intr_lock); } return ret; @@ -1152,6 +1221,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, vq->mb_pool = mb_pool; vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ; + rte_spinlock_init(&vq->intr_lock); dev->data->rx_queues[rx_queue_id] = vq; return 0; @@ -1173,6 +1243,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, } vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ; + rte_spinlock_init(&vq->intr_lock); dev->data->tx_queues[tx_queue_id] = vq; return 0;