From patchwork Thu Apr 19 07:07:38 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jens Freimann X-Patchwork-Id: 38471 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C74E37CFD; Thu, 19 Apr 2018 09:08:20 +0200 (CEST) Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id 3E1857CDA for ; Thu, 19 Apr 2018 09:08:19 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.rdu2.redhat.com [10.11.54.3]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id D5381EAEAB; Thu, 19 Apr 2018 07:08:18 +0000 (UTC) Received: from localhost (ovpn-117-19.ams2.redhat.com [10.36.117.19]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 3EA2D10F1BF5; Thu, 19 Apr 2018 07:08:12 +0000 (UTC) From: Jens Freimann To: dev@dpdk.org Cc: tiwei.bie@intel.com, yliu@fridaylinux.org, maxime.coquelin@redhat.com, mst@redhat.com, jens@freimann.org Date: Thu, 19 Apr 2018 09:07:38 +0200 Message-Id: <20180419070751.8933-8-jfreimann@redhat.com> In-Reply-To: <20180419070751.8933-1-jfreimann@redhat.com> References: <20180419070751.8933-1-jfreimann@redhat.com> X-Scanned-By: MIMEDefang 2.78 on 10.11.54.3 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.1]); Thu, 19 Apr 2018 07:08:18 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.1]); Thu, 19 Apr 2018 07:08:18 +0000 (UTC) for IP:'10.11.54.3' DOMAIN:'int-mx03.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'jfreimann@redhat.com' RCPT:'' Subject: [dpdk-dev] [PATCH v4 07/20] net/virtio: implement transmit path for packed queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This implements the transmit path for devices with support for Virtio 1.1. Add the feature bit for Virtio 1.1 and enable code to add buffers to vring and mark descriptors as available. This is based on a patch by Yuanhan Liu. Signed-off-by: Jens Freiman --- drivers/net/virtio/virtio_ethdev.c | 8 ++- drivers/net/virtio/virtio_ethdev.h | 2 + drivers/net/virtio/virtio_rxtx.c | 104 ++++++++++++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 2 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 0c9540b89..c5c2a268b 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -383,6 +383,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) vq->hw = hw; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; + if (vtpci_packed_queue(hw)) + vq->vq_ring.avail_wrap_counter = 1; /* * Reserve a memzone for vring elements @@ -1329,7 +1331,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = &virtio_recv_pkts; } - if (hw->use_simple_tx) { + if (vtpci_packed_queue(hw)) { + PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port %u", + eth_dev->data->port_id); + eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; + } else if (hw->use_simple_tx) { PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u", eth_dev->data->port_id); eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple; diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index bb40064ea..5420d7648 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -85,6 +85,8 @@ uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index a8aa87b32..b749babf3 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -38,6 +38,103 @@ #define VIRTIO_DUMP_PACKET(m, len) do { } while (0) #endif + +/* Cleanup from completed transmits. */ +static void +virtio_xmit_cleanup_packed(struct virtqueue *vq) +{ + uint16_t idx; + uint16_t size = vq->vq_nentries; + struct vring_desc_packed *desc = vq->vq_ring.desc_packed; + struct vq_desc_extra *dxp; + + idx = vq->vq_used_cons_idx; + while (desc_is_used(&desc[idx]) && + vq->vq_free_cnt < size) { + dxp = &vq->vq_descx[idx]; + vq->vq_free_cnt += dxp->ndescs; + idx = vq->vq_used_cons_idx + dxp->ndescs; + idx = idx >= size ? idx - size : idx; + } +} + +uint16_t +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; + uint16_t i; + struct vring_desc_packed *desc = vq->vq_ring.desc_packed; + uint16_t idx; + struct vq_desc_extra *dxp; + + if (unlikely(nb_pkts < 1)) + return nb_pkts; + + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); + + if (likely(vq->vq_free_cnt < vq->vq_free_thresh)) + virtio_xmit_cleanup_packed(vq); + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *txm = tx_pkts[i]; + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; + uint16_t head_idx; + int wrap_counter; + int descs_used; + + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) { + virtio_xmit_cleanup_packed(vq); + + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); + break; + } + } + + txvq->stats.bytes += txm->pkt_len; + + vq->vq_free_cnt -= txm->nb_segs + 1; + + wrap_counter = vq->vq_ring.avail_wrap_counter; + idx = update_pq_avail_index(vq); + head_idx = idx; + + dxp = &vq->vq_descx[idx]; + if (dxp->cookie != NULL) + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = txm; + + desc[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + desc[idx].len = vq->hw->vtnet_hdr_size; + desc[idx].flags = VRING_DESC_F_NEXT; + descs_used = 1; + + do { + idx = update_pq_avail_index(vq); + desc[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq); + desc[idx].len = txm->data_len; + desc[idx].flags = VRING_DESC_F_NEXT; + desc[idx].index = head_idx; + descs_used++; + } while ((txm = txm->next) != NULL); + + desc[idx].flags &= ~VRING_DESC_F_NEXT; + + rte_smp_wmb(); + _set_desc_avail(&desc[head_idx], wrap_counter); + vq->vq_descx[head_idx].ndescs = descs_used; + } + + txvq->stats.packets += i; + txvq->stats.errors += nb_pkts - i; + + return i; +} + int virtio_dev_rx_queue_done(void *rxq, uint16_t offset) { @@ -547,6 +644,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); + if (vtpci_packed_queue(hw)) { + vq->vq_ring.avail_wrap_counter = 1; + } + if (hw->use_simple_tx) { for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { vq->vq_ring.avail->ring[desc_idx] = @@ -567,7 +668,8 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, vq->vq_ring.avail->ring[desc_idx] = desc_idx; } - VIRTQUEUE_DUMP(vq); + if (!vtpci_packed_queue(hw)) + VIRTQUEUE_DUMP(vq); return 0; }