From patchwork Mon Sep 28 08:20:52 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 78978 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3E164A04C0; Mon, 28 Sep 2020 10:26:20 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 42F721D669; Mon, 28 Sep 2020 10:26:02 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 04CCD1D5BB for ; Mon, 28 Sep 2020 10:25:58 +0200 (CEST) IronPort-SDR: x4RMjPG2HPuKkTzSH5JtS0EBHEpcVhAnhJJJ6IIyU3+nJxgIeIRbejeRS3occQZjI3sSEdcnBd B0pL7ul9QZUQ== X-IronPort-AV: E=McAfee;i="6000,8403,9757"; a="223551614" X-IronPort-AV: E=Sophos;i="5.77,313,1596524400"; d="scan'208";a="223551614" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Sep 2020 01:25:56 -0700 IronPort-SDR: oNL6udYP6uSqdAmQ3xnATqsYOIhKcGhfTA18B5maiuLaA35I0+aKy79wWHV5cYK7w1idbNJTXn SBpb/m+SzdYg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,313,1596524400"; d="scan'208";a="456737948" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.56]) by orsmga004.jf.intel.com with ESMTP; 28 Sep 2020 01:25:55 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, zhihong.wang@intel.com Cc: dev@dpdk.org, Marvin Liu Date: Mon, 28 Sep 2020 16:20:52 +0800 Message-Id: <20200928082052.61872-2-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200928082052.61872-1-yong.liu@intel.com> References: <20200928082052.61872-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH 2/2] net/virtio: use indirect ring in packed datapath X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Like split ring, packed ring will utilize indirect ring elements when queuing mbufs need multiple descriptors. Thus each packet will take only one slot when having multiple segments. Signed-off-by: Marvin Liu Reviewed-by: Maxime Coquelin diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index f915b8a2c..b3b1586a7 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1756,7 +1756,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *txm = tx_pkts[nb_tx]; - int can_push = 0, slots, need; + int can_push = 0, use_indirect = 0, slots, need; /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || @@ -1768,12 +1768,15 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, rte_is_aligned(rte_pktmbuf_mtod(txm, char *), __alignof__(struct virtio_net_hdr_mrg_rxbuf))) can_push = 1; - + else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && + txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) + use_indirect = 1; /* How many main ring entries are needed to this Tx? + * indirect => 1 * any_layout => number of segments * default => number of segments + 1 */ - slots = txm->nb_segs + !can_push; + slots = use_indirect ? 1 : (txm->nb_segs + !can_push); need = slots - vq->vq_free_cnt; /* Positive value indicates it need free vring descriptors */ @@ -1791,7 +1794,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, if (can_push) virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order); else - virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0, + virtqueue_enqueue_xmit_packed(txvq, txm, slots, + use_indirect, 0, in_order); virtio_update_packet_stats(&txvq->stats, txm); diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c b/drivers/net/virtio/virtio_rxtx_packed_avx.c index 6a8214725..ce035b574 100644 --- a/drivers/net/virtio/virtio_rxtx_packed_avx.c +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c @@ -207,19 +207,26 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq, struct virtqueue *vq = txvq->vq; struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; - uint16_t slots, can_push; + uint16_t slots, can_push = 0, use_indirect = 0; int16_t need; + /* optimize ring usage */ + if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && + rte_mbuf_refcnt_read(txm) == 1 && + RTE_MBUF_DIRECT(txm) && + txm->nb_segs == 1 && + rte_pktmbuf_headroom(txm) >= hdr_size) + can_push = 1; + else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && + txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) + use_indirect = 1; /* How many main ring entries are needed to this Tx? + * indirect => 1 * any_layout => number of segments * default => number of segments + 1 */ - can_push = rte_mbuf_refcnt_read(txm) == 1 && - RTE_MBUF_DIRECT(txm) && - txm->nb_segs == 1 && - rte_pktmbuf_headroom(txm) >= hdr_size; - - slots = txm->nb_segs + !can_push; + slots = use_indirect ? 1 : (txm->nb_segs + !can_push); need = slots - vq->vq_free_cnt; /* Positive value indicates it need free vring descriptors */ @@ -234,7 +241,8 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq, } /* Enqueue Packet buffers */ - virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push, 1); + virtqueue_enqueue_xmit_packed(txvq, txm, slots, use_indirect, + can_push, 1); txvq->stats.bytes += txm->pkt_len; return 0; diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 7d910a0a1..753dfb85c 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -686,7 +686,8 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, static inline void virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, - uint16_t needed, int can_push, int in_order) + uint16_t needed, int use_indirect, int can_push, + int in_order) { struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; struct vq_desc_extra *dxp; @@ -722,6 +723,25 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, /* if offload disabled, it is not zeroed below, do it now */ if (!vq->hw->has_tx_offload) virtqueue_clear_net_hdr(hdr); + } else if (use_indirect) { + /* setup tx ring slot to point to indirect + * descriptor list stored in reserved region. + * + * the first slot in indirect ring is already preset + * to point to the header in reserved region + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr); + start_dp[idx].len = (needed + 1) * + sizeof(struct vring_packed_desc); + /* reset flags for indirect desc */ + head_flags = VRING_DESC_F_INDIRECT; + head_flags |= vq->vq_packed.cached_flags; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + + /* loop below will fill in rest of the indirect elements */ + start_dp = txr[idx].tx_packed_indir; + idx = 1; } else { /* setup first tx ring slot to point to header * stored in reserved region. @@ -767,6 +787,15 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, start_dp[prev].id = id; + if (use_indirect) { + idx = head_idx; + if (++idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + } + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); vq->vq_avail_idx = idx;