From patchwork Mon Dec 3 15:10:35 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 48508 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7097B1B42E; Mon, 3 Dec 2018 16:11:16 +0100 (CET) Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id C7CFC1B41B for ; Mon, 3 Dec 2018 16:11:14 +0100 (CET) Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.phx2.redhat.com [10.5.11.16]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 295A458E35; Mon, 3 Dec 2018 15:11:14 +0000 (UTC) Received: from localhost.localdomain (ovpn-112-44.ams2.redhat.com [10.36.112.44]) by smtp.corp.redhat.com (Postfix) with ESMTP id 7664C6A504; Mon, 3 Dec 2018 15:11:12 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, jfreimann@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com Cc: Maxime Coquelin Date: Mon, 3 Dec 2018 16:10:35 +0100 Message-Id: <20181203151036.11293-3-maxime.coquelin@redhat.com> In-Reply-To: <20181203151036.11293-1-maxime.coquelin@redhat.com> References: <20181203151036.11293-1-maxime.coquelin@redhat.com> X-Scanned-By: MIMEDefang 2.79 on 10.5.11.16 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.39]); Mon, 03 Dec 2018 15:11:14 +0000 (UTC) Subject: [dpdk-dev] [PATCH 2/3] net/virtio: merge Rx mergeable and non-mergeable paths X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Maxime Coquelin --- drivers/net/virtio/virtio_ethdev.c | 5 -- drivers/net/virtio/virtio_ethdev.h | 3 - drivers/net/virtio/virtio_rxtx.c | 115 ++--------------------------- 3 files changed, 7 insertions(+), 116 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 2ba66d291..9658b179a 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1335,11 +1335,6 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) "virtio: using inorder mergeable buffer Rx path on port %u", eth_dev->data->port_id); eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder; - } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - PMD_INIT_LOG(INFO, - "virtio: using mergeable buffer Rx path on port %u", - eth_dev->data->port_id); - eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; } else { PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", eth_dev->data->port_id); diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index e0f80e5a4..865863300 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -74,9 +74,6 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); -uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); - uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index e1c270b1c..331c1c56d 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -883,111 +883,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) } #define VIRTIO_MBUF_BURST_SZ 64 -#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) -uint16_t -virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) -{ - struct virtnet_rx *rxvq = rx_queue; - struct virtqueue *vq = rxvq->vq; - struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; - uint16_t nb_used, num, nb_rx; - uint32_t len[VIRTIO_MBUF_BURST_SZ]; - struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; - int error; - uint32_t i, nb_enqueued; - uint32_t hdr_size; - struct virtio_net_hdr *hdr; - - nb_rx = 0; - if (unlikely(hw->started == 0)) - return nb_rx; - - nb_used = VIRTQUEUE_NUSED(vq); - - virtio_rmb(); - - num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; - if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) - num = VIRTIO_MBUF_BURST_SZ; - if (likely(num > DESC_PER_CACHELINE)) - num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); - - num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); - PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); - - nb_enqueued = 0; - hdr_size = hw->vtnet_hdr_size; - - for (i = 0; i < num ; i++) { - rxm = rcv_pkts[i]; - - PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); - - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { - PMD_RX_LOG(ERR, "Packet drop"); - nb_enqueued++; - virtio_discard_rxbuf(vq, rxm); - rxvq->stats.errors++; - continue; - } - - rxm->port = rxvq->port_id; - rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxm->ol_flags = 0; - rxm->vlan_tci = 0; - - rxm->pkt_len = (uint32_t)(len[i] - hdr_size); - rxm->data_len = (uint16_t)(len[i] - hdr_size); - - hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + - RTE_PKTMBUF_HEADROOM - hdr_size); - - if (hw->vlan_strip) - rte_vlan_strip(rxm); - - if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { - virtio_discard_rxbuf(vq, rxm); - rxvq->stats.errors++; - continue; - } - - virtio_rx_stats_updated(rxvq, rxm); - - rx_pkts[nb_rx++] = rxm; - } - - rxvq->stats.packets += nb_rx; - - /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; - } - error = virtqueue_enqueue_recv_refill(vq, new_mbuf); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; - } - nb_enqueued++; - } - - if (likely(nb_enqueued)) { - vq_update_avail_idx(vq); - - if (unlikely(virtqueue_kick_prepare(vq))) { - virtqueue_notify(vq); - PMD_RX_LOG(DEBUG, "Notified"); - } - } - - return nb_rx; -} - uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1176,7 +1071,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue, } uint16_t -virtio_recv_mergeable_pkts(void *rx_queue, +virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -1239,10 +1134,14 @@ virtio_recv_mergeable_pkts(void *rx_queue, header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size); - seg_num = header->num_buffers; - if (seg_num == 0) + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + seg_num = header->num_buffers; + if (seg_num == 0) + seg_num = 1; + } else { seg_num = 1; + } rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->nb_segs = seg_num;