From patchwork Tue Nov 17 10:06:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 84261 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id CF224A04DB; Tue, 17 Nov 2020 11:08:13 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2FA1BC8F4; Tue, 17 Nov 2020 11:07:26 +0100 (CET) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id 5C66AC8DA for ; Tue, 17 Nov 2020 11:07:25 +0100 (CET) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id BCBA41477; Tue, 17 Nov 2020 02:07:23 -0800 (PST) Received: from net-arm-thunderx2-03.shanghai.arm.com (net-arm-thunderx2-03.shanghai.arm.com [10.169.208.206]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 454D93F718; Tue, 17 Nov 2020 02:07:21 -0800 (PST) From: Joyce Kong To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, jerinj@marvell.com, ruifeng.wang@arm.com, honnappa.nagarahalli@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Tue, 17 Nov 2020 18:06:34 +0800 Message-Id: <20201117100635.27690-4-joyce.kong@arm.com> X-Mailer: git-send-email 2.28.0 In-Reply-To: <20201117100635.27690-1-joyce.kong@arm.com> References: <20200911120906.45995-1-joyce.kong@arm.com> <20201117100635.27690-1-joyce.kong@arm.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 3/4] net/virtio: add vectorized packed ring Tx NEON path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Optimize packed ring Tx batch path with NEON instructions. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang Reviewed-by: Maxime Coquelin --- drivers/net/virtio/virtio_rxtx_packed.h | 6 +- drivers/net/virtio/virtio_rxtx_packed_neon.h | 143 +++++++++++++++++++ 2 files changed, 148 insertions(+), 1 deletion(-) diff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h index 8f5198ad7..016b6fb24 100644 --- a/drivers/net/virtio/virtio_rxtx_packed.h +++ b/drivers/net/virtio/virtio_rxtx_packed.h @@ -28,6 +28,8 @@ /* flag bits offset in packed ring desc from ID */ #define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \ offsetof(struct vring_packed_desc, id)) * BYTE_SIZE) +#define FLAGS_LEN_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \ + offsetof(struct vring_packed_desc, len)) * BYTE_SIZE) #endif #define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \ @@ -36,13 +38,15 @@ /* reference count offset in mbuf rearm data */ #define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \ offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE) + +#ifdef CC_AVX512_SUPPORT /* segment number offset in mbuf rearm data */ #define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \ offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE) - /* default rearm data */ #define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \ 1ULL << REFCNT_BITS_OFFSET) +#endif /* id bits offset in packed ring desc higher 64bits */ #define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \ diff --git a/drivers/net/virtio/virtio_rxtx_packed_neon.h b/drivers/net/virtio/virtio_rxtx_packed_neon.h index fb1e49909..041f771ea 100644 --- a/drivers/net/virtio/virtio_rxtx_packed_neon.h +++ b/drivers/net/virtio/virtio_rxtx_packed_neon.h @@ -16,6 +16,149 @@ #include "virtio_rxtx_packed.h" #include "virtqueue.h" +static inline int +virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq, + struct rte_mbuf **tx_pkts) +{ + struct virtqueue *vq = txvq->vq; + uint16_t head_size = vq->hw->vtnet_hdr_size; + uint16_t idx = vq->vq_avail_idx; + struct virtio_net_hdr *hdr; + struct vq_desc_extra *dxp; + struct vring_packed_desc *p_desc; + uint16_t i; + + if (idx & PACKED_BATCH_MASK) + return -1; + + if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries)) + return -1; + + /* Map four refcnt and nb_segs from mbufs to one NEON register. */ + uint8x16_t ref_seg_msk = { + 2, 3, 4, 5, + 10, 11, 12, 13, + 18, 19, 20, 21, + 26, 27, 28, 29 + }; + + /* Map four data_off from mbufs to one NEON register. */ + uint8x8_t data_msk = { + 0, 1, + 8, 9, + 16, 17, + 24, 25 + }; + + uint16x8_t net_hdr_msk = { + 0xFFFF, 0xFFFF, + 0, 0, 0, 0 + }; + + uint16x4_t pkts[PACKED_BATCH_SIZE]; + uint8x16x2_t mbuf; + /* Load four mbufs rearm data. */ + RTE_BUILD_BUG_ON(REFCNT_BITS_OFFSET >= 64); + pkts[0] = vld1_u16((uint16_t *)&tx_pkts[0]->rearm_data); + pkts[1] = vld1_u16((uint16_t *)&tx_pkts[1]->rearm_data); + pkts[2] = vld1_u16((uint16_t *)&tx_pkts[2]->rearm_data); + pkts[3] = vld1_u16((uint16_t *)&tx_pkts[3]->rearm_data); + + mbuf.val[0] = vreinterpretq_u8_u16(vcombine_u16(pkts[0], pkts[1])); + mbuf.val[1] = vreinterpretq_u8_u16(vcombine_u16(pkts[2], pkts[3])); + + /* refcnt = 1 and nb_segs = 1 */ + uint32x4_t def_ref_seg = vdupq_n_u32(0x10001); + /* Check refcnt and nb_segs. */ + uint32x4_t ref_seg = vreinterpretq_u32_u8(vqtbl2q_u8(mbuf, ref_seg_msk)); + poly128_t cmp1 = vreinterpretq_p128_u32(~vceqq_u32(ref_seg, def_ref_seg)); + if (unlikely(cmp1)) + return -1; + + /* Check headroom is enough. */ + uint16x4_t head_rooms = vdup_n_u16(head_size); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) != + offsetof(struct rte_mbuf, rearm_data)); + uint16x4_t data_offset = vreinterpret_u16_u8(vqtbl2_u8(mbuf, data_msk)); + uint64x1_t cmp2 = vreinterpret_u64_u16(vclt_u16(data_offset, head_rooms)); + if (unlikely(vget_lane_u64(cmp2, 0))) + return -1; + + virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + dxp = &vq->vq_descx[idx + i]; + dxp->ndescs = 1; + dxp->cookie = tx_pkts[i]; + } + + virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + tx_pkts[i]->data_off -= head_size; + tx_pkts[i]->data_len += head_size; + } + + uint64x2x2_t desc[PACKED_BATCH_SIZE / 2]; + uint64x2_t base_addr0 = { + VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off, + VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off + }; + uint64x2_t base_addr1 = { + VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off, + VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off + }; + + desc[0].val[0] = base_addr0; + desc[1].val[0] = base_addr1; + + uint64_t flags = (uint64_t)vq->vq_packed.cached_flags << FLAGS_LEN_BITS_OFFSET; + uint64x2_t tx_desc0 = { + flags | (uint64_t)idx << ID_BITS_OFFSET | tx_pkts[0]->data_len, + flags | (uint64_t)(idx + 1) << ID_BITS_OFFSET | tx_pkts[1]->data_len + }; + + uint64x2_t tx_desc1 = { + flags | (uint64_t)(idx + 2) << ID_BITS_OFFSET | tx_pkts[2]->data_len, + flags | (uint64_t)(idx + 3) << ID_BITS_OFFSET | tx_pkts[3]->data_len + }; + + desc[0].val[1] = tx_desc0; + desc[1].val[1] = tx_desc1; + + if (!vq->hw->has_tx_offload) { + virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + hdr = rte_pktmbuf_mtod_offset(tx_pkts[i], + struct virtio_net_hdr *, -head_size); + /* Clear net hdr. */ + uint16x8_t v_hdr = vld1q_u16((void *)hdr); + vst1q_u16((void *)hdr, vandq_u16(v_hdr, net_hdr_msk)); + } + } else { + virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + hdr = rte_pktmbuf_mtod_offset(tx_pkts[i], + struct virtio_net_hdr *, -head_size); + virtqueue_xmit_offload(hdr, tx_pkts[i], true); + } + } + + /* Enqueue packet buffers. */ + p_desc = &vq->vq_packed.ring.desc[idx]; + vst2q_u64((uint64_t *)p_desc, desc[0]); + vst2q_u64((uint64_t *)(p_desc + 2), desc[1]); + + virtio_update_batch_stats(&txvq->stats, tx_pkts[0]->pkt_len, + tx_pkts[1]->pkt_len, tx_pkts[2]->pkt_len, + tx_pkts[3]->pkt_len); + + vq->vq_avail_idx += PACKED_BATCH_SIZE; + vq->vq_free_cnt -= PACKED_BATCH_SIZE; + + if (vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + + return 0; +} + static inline uint16_t virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq, struct rte_mbuf **rx_pkts)