[v4,21/62] net/cnxk: add Rx vector version for cn9k

Message ID 20210623044702.4240-22-ndabilpuram@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series Marvell CNXK Ethdev Driver |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Nithin Dabilpuram June 23, 2021, 4:46 a.m. UTC
  From: Jerin Jacob <jerinj@marvell.com>

Add Rx burst vector version for CN9K.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn9k_rx.c     |  13 ++-
 drivers/net/cnxk/cn9k_rx.h     | 221 +++++++++++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_rx_vec.c |  17 ++++
 drivers/net/cnxk/meson.build   |  11 +-
 4 files changed, 260 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/cnxk/cn9k_rx_vec.c
  

Patch

diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index 87a62c9..01eb21f 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -50,7 +50,18 @@  cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 #undef R
 	};
 
-	pick_rx_func(eth_dev, nix_eth_rx_burst);
+	const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)					\
+	[f3][f2][f1][f0] = cn9k_nix_recv_pkts_vec_##name,
+
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	if (dev->scalar_ena)
+		pick_rx_func(eth_dev, nix_eth_rx_burst);
+	else
+		pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
 
 	if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
 		pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h
index 49f80ce..bc04f5c 100644
--- a/drivers/net/cnxk/cn9k_rx.h
+++ b/drivers/net/cnxk/cn9k_rx.h
@@ -6,6 +6,7 @@ 
 #define __CN9K_RX_H__
 
 #include <rte_ether.h>
+#include <rte_vect.h>
 
 #define NIX_RX_OFFLOAD_NONE	     (0)
 #define NIX_RX_OFFLOAD_RSS_F	     BIT(0)
@@ -266,6 +267,223 @@  cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	return nb_pkts;
 }
 
+#if defined(RTE_ARCH_ARM64)
+
+static __rte_always_inline uint16_t
+cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
+			  uint16_t pkts, const uint16_t flags)
+{
+	struct cn9k_eth_rxq *rxq = rx_queue;
+	uint16_t packets = 0;
+	uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
+	const uint64_t mbuf_initializer = rxq->mbuf_initializer;
+	const uint64x2_t data_off = vdupq_n_u64(rxq->data_off);
+	uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
+	uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
+	uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
+	uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
+	uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
+	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+	const uint16_t *lookup_mem = rxq->lookup_mem;
+	const uint32_t qmask = rxq->qmask;
+	const uint64_t wdata = rxq->wdata;
+	const uintptr_t desc = rxq->desc;
+	uint8x16_t f0, f1, f2, f3;
+	uint32_t head = rxq->head;
+	uint16_t pkts_left;
+
+	pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
+	pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
+
+	/* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
+	pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
+
+	while (packets < pkts) {
+		/* Exit loop if head is about to wrap and become unaligned */
+		if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
+		    NIX_DESCS_PER_LOOP) {
+			pkts_left += (pkts - packets);
+			break;
+		}
+
+		const uintptr_t cq0 = desc + CQE_SZ(head);
+
+		/* Prefetch N desc ahead */
+		rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8)));
+		rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9)));
+		rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10)));
+		rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11)));
+
+		/* Get NIX_RX_SG_S for size and buffer pointer */
+		cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64));
+		cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64));
+		cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64));
+		cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64));
+
+		/* Extract mbuf from NIX_RX_SG_S */
+		mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
+		mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
+		mbuf01 = vqsubq_u64(mbuf01, data_off);
+		mbuf23 = vqsubq_u64(mbuf23, data_off);
+
+		/* Move mbufs to scalar registers for future use */
+		mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
+		mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
+		mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
+		mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
+
+		/* Mask to get packet len from NIX_RX_SG_S */
+		const uint8x16_t shuf_msk = {
+			0xFF, 0xFF, /* pkt_type set as unknown */
+			0xFF, 0xFF, /* pkt_type set as unknown */
+			0,    1,    /* octet 1~0, low 16 bits pkt_len */
+			0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+			0,    1,    /* octet 1~0, 16 bits data_len */
+			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+		/* Form the rx_descriptor_fields1 with pkt_len and data_len */
+		f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
+		f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
+		f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
+		f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
+
+		/* Load CQE word0 and word 1 */
+		uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0];
+		uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1];
+		uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0];
+		uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1];
+		uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0];
+		uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1];
+		uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0];
+		uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1];
+
+		if (flags & NIX_RX_OFFLOAD_RSS_F) {
+			/* Fill rss in the rx_descriptor_fields1 */
+			f0 = vsetq_lane_u32(cq0_w0, f0, 3);
+			f1 = vsetq_lane_u32(cq1_w0, f1, 3);
+			f2 = vsetq_lane_u32(cq2_w0, f2, 3);
+			f3 = vsetq_lane_u32(cq3_w0, f3, 3);
+			ol_flags0 = PKT_RX_RSS_HASH;
+			ol_flags1 = PKT_RX_RSS_HASH;
+			ol_flags2 = PKT_RX_RSS_HASH;
+			ol_flags3 = PKT_RX_RSS_HASH;
+		} else {
+			ol_flags0 = 0;
+			ol_flags1 = 0;
+			ol_flags2 = 0;
+			ol_flags3 = 0;
+		}
+
+		if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
+			/* Fill packet_type in the rx_descriptor_fields1 */
+			f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
+					    f0, 0);
+			f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
+					    f1, 0);
+			f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
+					    f2, 0);
+			f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
+					    f3, 0);
+		}
+
+		if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
+			ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
+			ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
+			ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
+			ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
+		}
+
+		if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
+			ol_flags0 = nix_update_match_id(
+				*(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
+				mbuf0);
+			ol_flags1 = nix_update_match_id(
+				*(uint16_t *)(cq0 + CQE_SZ(1) + 38), ol_flags1,
+				mbuf1);
+			ol_flags2 = nix_update_match_id(
+				*(uint16_t *)(cq0 + CQE_SZ(2) + 38), ol_flags2,
+				mbuf2);
+			ol_flags3 = nix_update_match_id(
+				*(uint16_t *)(cq0 + CQE_SZ(3) + 38), ol_flags3,
+				mbuf3);
+		}
+
+		/* Form rearm_data with ol_flags */
+		rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
+		rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
+		rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
+		rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
+
+		/* Update rx_descriptor_fields1 */
+		vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
+		vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
+		vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
+		vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
+
+		/* Update rearm_data */
+		vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
+		vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
+		vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
+		vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
+
+		/* Update that no more segments */
+		mbuf0->next = NULL;
+		mbuf1->next = NULL;
+		mbuf2->next = NULL;
+		mbuf3->next = NULL;
+
+		/* Store the mbufs to rx_pkts */
+		vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
+		vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
+
+		/* Prefetch mbufs */
+		roc_prefetch_store_keep(mbuf0);
+		roc_prefetch_store_keep(mbuf1);
+		roc_prefetch_store_keep(mbuf2);
+		roc_prefetch_store_keep(mbuf3);
+
+		/* Mark mempool obj as "get" as it is alloc'ed by NIX */
+		__mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
+		__mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
+		__mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
+		__mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+
+		/* Advance head pointer and packets */
+		head += NIX_DESCS_PER_LOOP;
+		head &= qmask;
+		packets += NIX_DESCS_PER_LOOP;
+	}
+
+	rxq->head = head;
+	rxq->available -= packets;
+
+	rte_io_wmb();
+	/* Free all the CQs that we've processed */
+	plt_write64((rxq->wdata | packets), rxq->cq_door);
+
+	if (unlikely(pkts_left))
+		packets += cn9k_nix_recv_pkts(rx_queue, &rx_pkts[packets],
+					      pkts_left, flags);
+
+	return packets;
+}
+
+#else
+
+static inline uint16_t
+cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
+			  uint16_t pkts, const uint16_t flags)
+{
+	RTE_SET_USED(rx_queue);
+	RTE_SET_USED(rx_pkts);
+	RTE_SET_USED(pkts);
+	RTE_SET_USED(flags);
+
+	return 0;
+}
+
+#endif
+
 #define RSS_F	  NIX_RX_OFFLOAD_RSS_F
 #define PTYPE_F	  NIX_RX_OFFLOAD_PTYPE_F
 #define CKSUM_F	  NIX_RX_OFFLOAD_CHECKSUM_F
@@ -295,6 +513,9 @@  R(mark_cksum_ptype_rss,		1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);     \
 									       \
 	uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name(      \
+		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);     \
+									       \
+	uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name(       \
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
 
 NIX_RX_FASTPATH_MODES
diff --git a/drivers/net/cnxk/cn9k_rx_vec.c b/drivers/net/cnxk/cn9k_rx_vec.c
new file mode 100644
index 0000000..997177f
--- /dev/null
+++ b/drivers/net/cnxk/cn9k_rx_vec.c
@@ -0,0 +1,17 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_ethdev.h"
+#include "cn9k_rx.h"
+
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name(       \
+		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
+	{                                                                      \
+		return cn9k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts,      \
+						 (flags));                     \
+	}
+
+NIX_RX_FASTPATH_MODES
+#undef R
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index 5bfab21..9aba7d4 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -17,9 +17,18 @@  sources = files('cnxk_ethdev.c',
 # CN9K
 sources += files('cn9k_ethdev.c',
 		 'cn9k_rx.c',
-		 'cn9k_rx_mseg.c')
+		 'cn9k_rx_mseg.c',
+		 'cn9k_rx_vec.c')
 # CN10K
 sources += files('cn10k_ethdev.c')
 
 deps += ['bus_pci', 'cryptodev', 'eventdev', 'security']
 deps += ['common_cnxk', 'mempool_cnxk']
+
+# Allow implicit vector conversions
+extra_flags = ['-flax-vector-conversions']
+foreach flag: extra_flags
+	if cc.has_argument(flag)
+		cflags += flag
+	endif
+endforeach