diff mbox series

[v3,1/2] net/ice: add Tx AVX2 offload path

Message ID 1624933761-20884-2-git-send-email-wenzhuo.lu@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Qi Zhang
Headers show
Series add Rx/Tx offload paths for ICE AVX2 | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Wenzhuo Lu June 29, 2021, 2:29 a.m. UTC
Add a specific path for TX AVX2.
In this path, support the HW offload features, like,
checksum insertion, VLAN insertion.
This path is chosen automatically according to the
configuration.

'inline' is used, then the duplicate code is generated
by the compiler.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c          | 37 ++++++++++++++++---------
 drivers/net/ice/ice_rxtx.h          |  2 ++
 drivers/net/ice/ice_rxtx_vec_avx2.c | 54 ++++++++++++++++++++++++++-----------
 3 files changed, 65 insertions(+), 28 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index fc9bb5a..5419047 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -3288,7 +3288,7 @@ 
 #ifdef RTE_ARCH_X86
 	struct ice_tx_queue *txq;
 	int i;
-	int tx_check_ret = 0;
+	int tx_check_ret = -1;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		ad->tx_use_avx2 = false;
@@ -3307,13 +3307,14 @@ 
 			PMD_DRV_LOG(NOTICE,
 				"AVX512 is not supported in build env");
 #endif
-			if (!ad->tx_use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&
-			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
-			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
-			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+			if (!ad->tx_use_avx512 &&
+				(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+				rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
 				ad->tx_use_avx2 = true;
 
-			if (!ad->tx_use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+			if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
+				tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
 				ad->tx_vec_allowed = false;
 
 			if (ad->tx_vec_allowed) {
@@ -3331,6 +3332,7 @@ 
 	}
 
 	if (ad->tx_vec_allowed) {
+		dev->tx_pkt_prepare = NULL;
 		if (ad->tx_use_avx512) {
 #ifdef CC_AVX512_SUPPORT
 			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
@@ -3339,6 +3341,7 @@ 
 					    dev->data->port_id);
 				dev->tx_pkt_burst =
 					ice_xmit_pkts_vec_avx512_offload;
+				dev->tx_pkt_prepare = ice_prep_pkts;
 			} else {
 				PMD_DRV_LOG(NOTICE,
 					    "Using AVX512 Vector Tx (port %d).",
@@ -3347,14 +3350,22 @@ 
 			}
 #endif
 		} else {
-			PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
-				    ad->tx_use_avx2 ? "avx2 " : "",
-				    dev->data->port_id);
-			dev->tx_pkt_burst = ad->tx_use_avx2 ?
-					    ice_xmit_pkts_vec_avx2 :
-					    ice_xmit_pkts_vec;
+			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX2 OFFLOAD Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst =
+					ice_xmit_pkts_vec_avx2_offload;
+				dev->tx_pkt_prepare = ice_prep_pkts;
+			} else {
+				PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+					    ad->tx_use_avx2 ? "avx2 " : "",
+					    dev->data->port_id);
+				dev->tx_pkt_burst = ad->tx_use_avx2 ?
+						    ice_xmit_pkts_vec_avx2 :
+						    ice_xmit_pkts_vec;
+			}
 		}
-		dev->tx_pkt_prepare = NULL;
 
 		return;
 	}
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 86b6f3d..f0536f7 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -255,6 +255,8 @@  uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
 					  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+					uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				  uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 165bc1b..b72946b 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -769,30 +769,32 @@ 
 				rx_pkts + retval, nb_pkts);
 }
 
-static inline void
+static __rte_always_inline void
 ice_vtx1(volatile struct ice_tx_desc *txdp,
-	 struct rte_mbuf *pkt, uint64_t flags)
+	 struct rte_mbuf *pkt, uint64_t flags, bool offload)
 {
 	uint64_t high_qw =
 		(ICE_TX_DESC_DTYPE_DATA |
 		 ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
 		 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
+	if (offload)
+		ice_txd_enable_offload(pkt, &high_qw);
 
 	__m128i descriptor = _mm_set_epi64x(high_qw,
 				pkt->buf_iova + pkt->data_off);
 	_mm_store_si128((__m128i *)txdp, descriptor);
 }
 
-static inline void
+static __rte_always_inline void
 ice_vtx(volatile struct ice_tx_desc *txdp,
-	struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+	struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags, bool offload)
 {
 	const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
 			((uint64_t)flags  << ICE_TXD_QW1_CMD_S));
 
 	/* if unaligned on 32-bit boundary, do one to align */
 	if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
-		ice_vtx1(txdp, *pkt, flags);
+		ice_vtx1(txdp, *pkt, flags, offload);
 		nb_pkts--, txdp++, pkt++;
 	}
 
@@ -802,18 +804,26 @@ 
 			hi_qw_tmpl |
 			((uint64_t)pkt[3]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (offload)
+			ice_txd_enable_offload(pkt[3], &hi_qw3);
 		uint64_t hi_qw2 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[2]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (offload)
+			ice_txd_enable_offload(pkt[2], &hi_qw2);
 		uint64_t hi_qw1 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[1]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (offload)
+			ice_txd_enable_offload(pkt[1], &hi_qw1);
 		uint64_t hi_qw0 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[0]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (offload)
+			ice_txd_enable_offload(pkt[0], &hi_qw0);
 
 		__m256i desc2_3 =
 			_mm256_set_epi64x
@@ -833,14 +843,14 @@ 
 
 	/* do any last ones */
 	while (nb_pkts) {
-		ice_vtx1(txdp, *pkt, flags);
+		ice_vtx1(txdp, *pkt, flags, offload);
 		txdp++, pkt++, nb_pkts--;
 	}
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
-			      uint16_t nb_pkts)
+			      uint16_t nb_pkts, bool offload)
 {
 	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
 	volatile struct ice_tx_desc *txdp;
@@ -869,11 +879,11 @@ 
 	if (nb_commit >= n) {
 		ice_tx_backlog_entry(txep, tx_pkts, n);
 
-		ice_vtx(txdp, tx_pkts, n - 1, flags);
+		ice_vtx(txdp, tx_pkts, n - 1, flags, offload);
 		tx_pkts += (n - 1);
 		txdp += (n - 1);
 
-		ice_vtx1(txdp, *tx_pkts++, rs);
+		ice_vtx1(txdp, *tx_pkts++, rs, offload);
 
 		nb_commit = (uint16_t)(nb_commit - n);
 
@@ -887,7 +897,7 @@ 
 
 	ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
 
-	ice_vtx(txdp, tx_pkts, nb_commit, flags);
+	ice_vtx(txdp, tx_pkts, nb_commit, flags, offload);
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
@@ -905,9 +915,9 @@ 
 	return nb_pkts;
 }
 
-uint16_t
-ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
-		       uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
+			      uint16_t nb_pkts, bool offload)
 {
 	uint16_t nb_tx = 0;
 	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
@@ -917,7 +927,7 @@ 
 
 		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 		ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
-						    num);
+						    num, offload);
 		nb_tx += ret;
 		nb_pkts -= ret;
 		if (ret < num)
@@ -926,3 +936,17 @@ 
 
 	return nb_tx;
 }
+
+uint16_t
+ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+		       uint16_t nb_pkts)
+{
+	return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, false);
+}
+
+uint16_t
+ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+			       uint16_t nb_pkts)
+{
+	return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true);
+}