From patchwork Sat Apr 1 07:22:47 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yuanhan Liu X-Patchwork-Id: 23108 X-Patchwork-Delegate: yuanhan.liu@linux.intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 877B25598; Sat, 1 Apr 2017 09:26:14 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id C0E762C2E for ; Sat, 1 Apr 2017 09:25:53 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=intel.com; i=@intel.com; q=dns/txt; s=intel; t=1491031553; x=1522567553; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=/H+Xua5Prvtzq9wMW61+Pk2g5PqbveajwRNWjk/u10g=; b=MpOUlIvRB74lyusvZZ7cPfvPpaqQk9y+OGMYIr3NFQxvZH5SySlVK003 9mxrTHBptkwK6sQ960OAivqGMBzQCQ==; Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Apr 2017 00:25:50 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.36,256,1486454400"; d="scan'208";a="67700680" Received: from yliu-dev.sh.intel.com ([10.239.67.162]) by orsmga002.jf.intel.com with ESMTP; 01 Apr 2017 00:25:34 -0700 From: Yuanhan Liu To: dev@dpdk.org Cc: Maxime Coquelin , Harris James R , Liu Changpeng , Yuanhan Liu Date: Sat, 1 Apr 2017 15:22:47 +0800 Message-Id: <1491031380-1499-10-git-send-email-yuanhan.liu@linux.intel.com> X-Mailer: git-send-email 1.9.0 In-Reply-To: <1491031380-1499-1-git-send-email-yuanhan.liu@linux.intel.com> References: <1490705142-893-1-git-send-email-yuanhan.liu@linux.intel.com> <1491031380-1499-1-git-send-email-yuanhan.liu@linux.intel.com> Subject: [dpdk-dev] [PATCH v4 09/22] vhost: turn queue pair to vring X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The queue pair is very virtio-net specific, other devices don't have such concept. To make it generic, we should log the number of vrings instead of the number of queue pairs. This patch just does a simple convert, a later patch would export the number of vrings to applications. Signed-off-by: Yuanhan Liu Reviewed-by: Maxime Coquelin --- v2: enable all vrings by unconditionally --- lib/librte_vhost/vhost.c | 80 +++++++++++++++---------------------------- lib/librte_vhost/vhost.h | 4 +-- lib/librte_vhost/vhost_user.c | 28 +++++---------- lib/librte_vhost/virtio_net.c | 10 +++--- 4 files changed, 42 insertions(+), 80 deletions(-) diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 4b9d781..f0ed729 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -84,10 +84,8 @@ struct virtio_net * vhost_backend_cleanup(dev); - for (i = 0; i < dev->virt_qp_nb; i++) { - cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ], destroy); - cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ], destroy); - } + for (i = 0; i < dev->nr_vring; i++) + cleanup_vq(dev->virtqueue[i], destroy); } /* @@ -97,24 +95,21 @@ struct virtio_net * free_device(struct virtio_net *dev) { uint32_t i; - struct vhost_virtqueue *rxq, *txq; + struct vhost_virtqueue *vq; - for (i = 0; i < dev->virt_qp_nb; i++) { - rxq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ]; - txq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ]; + for (i = 0; i < dev->nr_vring; i++) { + vq = dev->virtqueue[i]; - rte_free(rxq->shadow_used_ring); - rte_free(txq->shadow_used_ring); + rte_free(vq->shadow_used_ring); - /* rxq and txq are allocated together as queue-pair */ - rte_free(rxq); + rte_free(vq); } rte_free(dev); } static void -init_vring_queue(struct vhost_virtqueue *vq, int qp_idx) +init_vring_queue(struct vhost_virtqueue *vq) { memset(vq, 0, sizeof(struct vhost_virtqueue)); @@ -124,69 +119,48 @@ struct virtio_net * /* Backends are set to -1 indicating an inactive device. */ vq->backend = -1; - /* always set the default vq pair to enabled */ - if (qp_idx == 0) - vq->enabled = 1; + /* + * always set the vq to enabled; this is to keep compatibility + * with the old QEMU, whereas there is no SET_VRING_ENABLE message. + */ + vq->enabled = 1; TAILQ_INIT(&vq->zmbuf_list); } static void -init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx) -{ - uint32_t base_idx = qp_idx * VIRTIO_QNUM; - - init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx); - init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx); -} - -static void -reset_vring_queue(struct vhost_virtqueue *vq, int qp_idx) +reset_vring_queue(struct vhost_virtqueue *vq) { int callfd; callfd = vq->callfd; - init_vring_queue(vq, qp_idx); + init_vring_queue(vq); vq->callfd = callfd; } -static void -reset_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx) -{ - uint32_t base_idx = qp_idx * VIRTIO_QNUM; - - reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx); - reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx); -} - int -alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx) +alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) { - struct vhost_virtqueue *virtqueue = NULL; - uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ; - uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ; + struct vhost_virtqueue *vq; - virtqueue = rte_malloc(NULL, - sizeof(struct vhost_virtqueue) * VIRTIO_QNUM, 0); - if (virtqueue == NULL) { + vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0); + if (vq == NULL) { RTE_LOG(ERR, VHOST_CONFIG, - "Failed to allocate memory for virt qp:%d.\n", qp_idx); + "Failed to allocate memory for vring:%u.\n", vring_idx); return -1; } - dev->virtqueue[virt_rx_q_idx] = virtqueue; - dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ; - - init_vring_queue_pair(dev, qp_idx); + dev->virtqueue[vring_idx] = vq; + init_vring_queue(vq); - dev->virt_qp_nb += 1; + dev->nr_vring += 1; return 0; } /* * Reset some variables in device structure, while keeping few - * others untouched, such as vid, ifname, virt_qp_nb: they + * others untouched, such as vid, ifname, nr_vring: they * should be same unless the device is removed. */ void @@ -198,8 +172,8 @@ struct virtio_net * dev->protocol_features = 0; dev->flags = 0; - for (i = 0; i < dev->virt_qp_nb; i++) - reset_vring_queue_pair(dev, i); + for (i = 0; i < dev->nr_vring; i++) + reset_vring_queue(dev->virtqueue[i]); } /* @@ -340,7 +314,7 @@ struct virtio_net * if (dev == NULL) return 0; - return dev->virt_qp_nb; + return dev->nr_vring / 2; } int diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index b5c5046..84e379a 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -176,7 +176,7 @@ struct virtio_net { uint16_t vhost_hlen; /* to tell if we need broadcast rarp packet */ rte_atomic16_t broadcast_rarp; - uint32_t virt_qp_nb; + uint32_t nr_vring; int dequeue_zero_copy; struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) @@ -256,7 +256,7 @@ static inline phys_addr_t __attribute__((always_inline)) void reset_device(struct virtio_net *dev); void vhost_destroy_device(int); -int alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx); +int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); void vhost_set_ifname(int, const char *if_name, unsigned int if_len); void vhost_enable_dequeue_zero_copy(int vid); diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 30cf8f8..4337ce7 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -233,13 +233,6 @@ struct vhost_virtqueue *old_vq, *vq; int ret; - /* - * vq is allocated on pairs, we should try to do realloc - * on first queue of one queue pair only. - */ - if (index % VIRTIO_QNUM != 0) - return dev; - old_dev = dev; vq = old_vq = dev->virtqueue[index]; @@ -257,8 +250,7 @@ if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate vq from %d to %d node\n", oldnode, newnode); - vq = rte_malloc_socket(NULL, sizeof(*vq) * VIRTIO_QNUM, 0, - newnode); + vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode); if (!vq) return dev; @@ -290,7 +282,6 @@ out: dev->virtqueue[index] = vq; - dev->virtqueue[index + 1] = vq + 1; vhost_devices[dev->vid] = dev; return dev; @@ -621,14 +612,13 @@ static int virtio_is_ready(struct virtio_net *dev) { - struct vhost_virtqueue *rvq, *tvq; + struct vhost_virtqueue *vq; uint32_t i; - for (i = 0; i < dev->virt_qp_nb; i++) { - rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ]; - tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ]; + for (i = 0; i < dev->nr_vring; i++) { + vq = dev->virtqueue[i]; - if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) { + if (!vq_is_ready(vq)) { RTE_LOG(INFO, VHOST_CONFIG, "virtio is not ready for processing.\n"); return 0; @@ -940,7 +930,6 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg) { uint16_t vring_idx; - uint16_t qp_idx; switch (msg->request) { case VHOST_USER_SET_VRING_KICK: @@ -960,17 +949,16 @@ return 0; } - qp_idx = vring_idx / VIRTIO_QNUM; - if (qp_idx >= VHOST_MAX_QUEUE_PAIRS) { + if (vring_idx >= VHOST_MAX_VRING) { RTE_LOG(ERR, VHOST_CONFIG, "invalid vring index: %u\n", vring_idx); return -1; } - if (dev->virtqueue[qp_idx]) + if (dev->virtqueue[vring_idx]) return 0; - return alloc_vring_queue_pair(dev, qp_idx); + return alloc_vring_queue(dev, vring_idx); } int diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index c1187d4..28542cf 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -86,9 +86,9 @@ static inline void __attribute__((always_inline)) } static bool -is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb) +is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) { - return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM; + return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring; } static inline void __attribute__((always_inline)) @@ -283,7 +283,7 @@ static inline uint32_t __attribute__((always_inline)) uint32_t i, sz; LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) { + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); return 0; @@ -554,7 +554,7 @@ static inline uint32_t __attribute__((always_inline)) uint16_t avail_head; LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) { + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); return 0; @@ -1019,7 +1019,7 @@ static inline bool __attribute__((always_inline)) if (!dev) return 0; - if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) { + if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); return 0;