[v8,17/21] net/cpfl: add AVX512 data path for split queue model

Message ID 20230302103527.931071-18-mingxia.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add support for cpfl PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Liu, Mingxia March 2, 2023, 10:35 a.m. UTC
  Add support of AVX512 data path for split queue model.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/net/cpfl/cpfl_rxtx.c            | 56 +++++++++++++++++++++++--
 drivers/net/cpfl/cpfl_rxtx_vec_common.h | 20 ++++++++-
 drivers/net/cpfl/meson.build            |  6 ++-
 3 files changed, 75 insertions(+), 7 deletions(-)
  

Patch

diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index ea28d3978c..dac95579f5 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -758,7 +758,8 @@  cpfl_set_rx_function(struct rte_eth_dev *dev)
 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
 #ifdef CC_AVX512_SUPPORT
 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
-			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
+			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ))
 				vport->rx_use_avx512 = true;
 #else
 		PMD_DRV_LOG(NOTICE,
@@ -771,6 +772,21 @@  cpfl_set_rx_function(struct rte_eth_dev *dev)
 
 #ifdef RTE_ARCH_X86
 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		if (vport->rx_vec_allowed) {
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				(void)idpf_qc_splitq_rx_vec_setup(rxq);
+			}
+#ifdef CC_AVX512_SUPPORT
+			if (vport->rx_use_avx512) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using Split AVX512 Vector Rx (port %d).",
+					    dev->data->port_id);
+				dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts_avx512;
+				return;
+			}
+#endif /* CC_AVX512_SUPPORT */
+		}
 		PMD_DRV_LOG(NOTICE,
 			    "Using Split Scalar Rx (port %d).",
 			    dev->data->port_id);
@@ -826,9 +842,17 @@  cpfl_set_tx_function(struct rte_eth_dev *dev)
 		vport->tx_vec_allowed = true;
 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
 #ifdef CC_AVX512_SUPPORT
+		{
 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
 			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
 				vport->tx_use_avx512 = true;
+			if (vport->tx_use_avx512) {
+				for (i = 0; i < dev->data->nb_tx_queues; i++) {
+					txq = dev->data->tx_queues[i];
+					idpf_qc_tx_vec_avx512_setup(txq);
+				}
+			}
+		}
 #else
 		PMD_DRV_LOG(NOTICE,
 			    "AVX512 is not supported in build env");
@@ -838,14 +862,26 @@  cpfl_set_tx_function(struct rte_eth_dev *dev)
 	}
 #endif /* RTE_ARCH_X86 */
 
+#ifdef RTE_ARCH_X86
 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		if (vport->tx_vec_allowed) {
+#ifdef CC_AVX512_SUPPORT
+			if (vport->tx_use_avx512) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using Split AVX512 Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts_avx512;
+				dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+				return;
+			}
+#endif /* CC_AVX512_SUPPORT */
+		}
 		PMD_DRV_LOG(NOTICE,
 			    "Using Split Scalar Tx (port %d).",
 			    dev->data->port_id);
 		dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
 	} else {
-#ifdef RTE_ARCH_X86
 		if (vport->tx_vec_allowed) {
 #ifdef CC_AVX512_SUPPORT
 			if (vport->tx_use_avx512) {
@@ -864,11 +900,25 @@  cpfl_set_tx_function(struct rte_eth_dev *dev)
 			}
 #endif /* CC_AVX512_SUPPORT */
 		}
-#endif /* RTE_ARCH_X86 */
 		PMD_DRV_LOG(NOTICE,
 			    "Using Single Scalar Tx (port %d).",
 			    dev->data->port_id);
 		dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
 	}
+#else
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		PMD_DRV_LOG(NOTICE,
+			    "Using Split Scalar Tx (port %d).",
+			    dev->data->port_id);
+		dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
+		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+	} else {
+		PMD_DRV_LOG(NOTICE,
+			    "Using Single Scalar Tx (port %d).",
+			    dev->data->port_id);
+		dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
+		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+	}
+#endif /* RTE_ARCH_X86 */
 }
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 2d4c6a0ef3..665418d27d 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -64,15 +64,31 @@  cpfl_tx_vec_queue_default(struct idpf_tx_queue *txq)
 	return CPFL_VECTOR_PATH;
 }
 
+static inline int
+cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
+{
+	if (rxq->bufq2->rx_buf_len < rxq->max_pkt_len)
+		return CPFL_SCALAR_PATH;
+
+	return CPFL_VECTOR_PATH;
+}
+
 static inline int
 cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
+	struct idpf_vport *vport = dev->data->dev_private;
 	struct idpf_rx_queue *rxq;
-	int i, ret = 0;
+	int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
-		ret = (cpfl_rx_vec_queue_default(rxq));
+		default_ret = cpfl_rx_vec_queue_default(rxq);
+		if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+			splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+			ret = splitq_ret && default_ret;
+		} else {
+			ret = default_ret;
+		}
 		if (ret == CPFL_SCALAR_PATH)
 			return CPFL_SCALAR_PATH;
 	}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index fbe6500826..2cf69258e2 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -23,13 +23,15 @@  sources = files(
 if arch_subdir == 'x86'
     cpfl_avx512_cpu_support = (
         cc.get_define('__AVX512F__', args: machine_args) != '' and
-        cc.get_define('__AVX512BW__', args: machine_args) != ''
+        cc.get_define('__AVX512BW__', args: machine_args) != '' and
+        cc.get_define('__AVX512DQ__', args: machine_args) != ''
     )
 
     cpfl_avx512_cc_support = (
         not machine_args.contains('-mno-avx512f') and
         cc.has_argument('-mavx512f') and
-        cc.has_argument('-mavx512bw')
+        cc.has_argument('-mavx512bw') and
+        cc.has_argument('-mavx512dq')
     )
 
     if cpfl_avx512_cpu_support == true or cpfl_avx512_cc_support == true