From patchwork Fri Sep 4 20:58:28 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stephen Hemminger X-Patchwork-Id: 6904 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id CA5208D95; Fri, 4 Sep 2015 22:58:38 +0200 (CEST) Received: from mail-pa0-f46.google.com (mail-pa0-f46.google.com [209.85.220.46]) by dpdk.org (Postfix) with ESMTP id EFD0C8D8B for ; Fri, 4 Sep 2015 22:58:36 +0200 (CEST) Received: by pacfv12 with SMTP id fv12so35113169pac.2 for ; Fri, 04 Sep 2015 13:58:36 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=C4UHPu7jb3siAVO+MsYnJGa+LT1YUaajawY3dMaOHns=; b=cI2ZuwBbpACDLCpmU7c74PlCADk8wx450ahAospO1i3pt/2EkvFS95ChVumoulOHas SDy0wTP1fjT/gYEO03j4bTF3ddQ4NFmjP3MDK3ZDUTkFyT4DsMAGv9pEPa2X/R6BFiuC m1dSMFVXq5+bCWXUL7rpf7oG9FuhIH4K0VO4vw/Qm1mDytw8Tl0B8mTVwTWwRfspvFQQ m0f//NTafzc0SCS2iQVmU14wW55Sqd2kByxc8dmZitCMLXLEoZhiKytlNYkEbvksvPV3 Wo14k/LvvWZSFeDnCZZTUmlXWIRuSaJgUnSz63VVADFamuufP9Iq42jEw1F3bca4DQml FySg== X-Gm-Message-State: ALoCoQmvHEwf2jBngU7adRpNEyIZ/BAcMLPJf0PQkXxaisUIAw4ve1bx9onbh9XmLogH1CxmwLA9 X-Received: by 10.66.154.97 with SMTP id vn1mr5098768pab.74.1441400316413; Fri, 04 Sep 2015 13:58:36 -0700 (PDT) Received: from urahara.home.lan (static-50-53-82-155.bvtn.or.frontiernet.net. [50.53.82.155]) by smtp.gmail.com with ESMTPSA id u5sm3524262pdr.63.2015.09.04.13.58.35 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Fri, 04 Sep 2015 13:58:35 -0700 (PDT) From: Stephen Hemminger To: huawei.xie@intel.com, changchun.ouyang@intel.com Date: Fri, 4 Sep 2015 13:58:28 -0700 Message-Id: <1441400308-5725-5-git-send-email-stephen@networkplumber.org> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1441400308-5725-1-git-send-email-stephen@networkplumber.org> References: <1441400308-5725-1-git-send-email-stephen@networkplumber.org> Cc: dev@dpdk.org Subject: [dpdk-dev] [PATCH 4/4] virtio: use any layout on transmit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Virtio supports a feature that allows sender to put transmit header prepended to data. It requires that the mbuf be writeable, correct alignment, and the feature has been negotiatied. If all this works out, then it will be the optimum way to transmit a single segment packet. Signed-off-by: Stephen Hemminger --- drivers/net/virtio/virtio_ethdev.h | 3 +- drivers/net/virtio/virtio_rxtx.c | 67 ++++++++++++++++++++++++++------------ 2 files changed, 49 insertions(+), 21 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 07a9265..f260fbb 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -65,7 +65,8 @@ 1u << VIRTIO_NET_F_CTRL_RX | \ 1u << VIRTIO_NET_F_CTRL_VLAN | \ 1u << VIRTIO_NET_F_MRG_RXBUF | \ - 1u << VIRTIO_RING_F_INDIRECT_DESC) + 1u << VIRTIO_RING_F_INDIRECT_DESC| \ + 1u << VIRTIO_F_ANY_LAYOUT) /* * CQ function prototype diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 8979695..5ec9b29 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -200,13 +200,14 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) static int virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, - int use_indirect) + int use_indirect, int can_push) { struct vq_desc_extra *dxp; struct vring_desc *start_dp; uint16_t seg_num = cookie->nb_segs; - uint16_t needed = use_indirect ? 1 : 1 + seg_num; + uint16_t needed = use_indirect ? 1 : !can_push + seg_num; uint16_t head_idx, idx; + uint16_t head_size = txvq->hw->vtnet_hdr_size; unsigned long offs; if (unlikely(txvq->vq_free_cnt == 0)) @@ -236,27 +237,31 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, idx = 0; } - offs = offsetof(struct virtio_tx_region, tx_hdr) - + idx * sizeof(struct virtio_tx_region); + if (can_push) { + /* put on zero'd transmit header (no offloads) */ + void *hdr = rte_pktmbuf_prepend(cookie, head_size); - start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; - start_dp[idx].len = txvq->hw->vtnet_hdr_size; - start_dp[idx].flags = VRING_DESC_F_NEXT; + memset(hdr, 0, head_size); + } else { + offs = offsetof(struct virtio_tx_region, tx_hdr) + + idx * sizeof(struct virtio_tx_region); - for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) { + start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; + start_dp[idx].len = head_size; + start_dp[idx].flags = VRING_DESC_F_NEXT; idx = start_dp[idx].next; + } + + for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) { start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie); start_dp[idx].len = cookie->data_len; - start_dp[idx].flags = VRING_DESC_F_NEXT; cookie = cookie->next; + start_dp[idx].flags = cookie ? VRING_DESC_F_NEXT : 0; + idx = start_dp[idx].next; } - start_dp[idx].flags &= ~VRING_DESC_F_NEXT; - if (use_indirect) idx = txvq->vq_ring.desc[head_idx].next; - else - idx = start_dp[idx].next; txvq->vq_desc_head_idx = idx; if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) @@ -762,6 +767,26 @@ virtio_recv_mergeable_pkts(void *rx_queue, return nb_rx; } +/* Evaluate whether the virtio header can just be put in place in the mbuf */ +static int virtio_xmit_push_ok(const struct virtqueue *txvq, + const struct rte_mbuf *m) +{ + if (rte_mbuf_refcnt_read(m) != 1) + return 0; /* no mbuf is shared */ + + if (rte_pktmbuf_headroom(m) < txvq->hw->vtnet_hdr_size) + return 0; /* no space in headroom */ + + if (!rte_is_aligned(rte_pktmbuf_mtod(m, char *), + sizeof(struct virtio_net_hdr_mrg_rxbuf))) + return 0; /* not alligned */ + + if (m->nb_segs > 1) + return 0; /* better off using indirect */ + + return vtpci_with_feature(txvq->hw, VIRTIO_F_ANY_LAYOUT); +} + uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -781,14 +806,16 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *txm = tx_pkts[nb_tx]; - int use_indirect, slots, need; - - use_indirect = vtpci_with_feature(txvq->hw, - VIRTIO_RING_F_INDIRECT_DESC) - && (txm->nb_segs < VIRTIO_MAX_TX_INDIRECT); + int use_indirect = 0, slots, need; + int can_push = virtio_xmit_push_ok(txvq, txm); + + if (!can_push && + txm->nb_segs < VIRTIO_MAX_TX_INDIRECT && + vtpci_with_feature(txvq->hw, VIRTIO_RING_F_INDIRECT_DESC)) + use_indirect = 1; /* How many ring entries are needed to this Tx? */ - slots = use_indirect ? 1 : 1 + txm->nb_segs; + slots = use_indirect ? 1 : !can_push + txm->nb_segs; need = slots - txvq->vq_free_cnt; /* Positive value indicates it need free vring descriptors */ @@ -816,7 +843,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* Enqueue Packet buffers */ - error = virtqueue_enqueue_xmit(txvq, txm, use_indirect); + error = virtqueue_enqueue_xmit(txvq, txm, use_indirect, can_push); if (unlikely(error)) { if (error == ENOSPC) PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");