[v3,5/8] net/ice: support Tx SSE vector

Message ID 1552630975-62900-6-git-send-email-wenzhuo.lu@intel.com
State Superseded, archived
Delegated to: Qi Zhang
Headers show
Series
  • Support vector instructions on ICE
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch warning coding style issues

Commit Message

Wenzhuo Lu March 15, 2019, 6:22 a.m.
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 doc/guides/nics/features/ice_vec.ini  |   2 +
 drivers/net/ice/ice_rxtx.c            |  19 +++++
 drivers/net/ice/ice_rxtx.h            |   4 +
 drivers/net/ice/ice_rxtx_vec_common.h | 133 +++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx_vec_sse.c    | 135 ++++++++++++++++++++++++++++++++++
 5 files changed, 293 insertions(+)

Patch

diff --git a/doc/guides/nics/features/ice_vec.ini b/doc/guides/nics/features/ice_vec.ini
index 1a19788..173c8f2 100644
--- a/doc/guides/nics/features/ice_vec.ini
+++ b/doc/guides/nics/features/ice_vec.ini
@@ -12,6 +12,7 @@  Queue start/stop     = Y
 MTU update           = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
@@ -22,6 +23,7 @@  RSS reta update      = Y
 VLAN filter          = Y
 Packet type parsing  = Y
 Rx descriptor status = Y
+Tx descriptor status = Y
 Basic stats          = Y
 Extended stats       = Y
 FW version           = Y
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 6529ae5..6fbb5a2 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -2336,6 +2336,25 @@  void __attribute__((cold))
 {
 	struct ice_adapter *ad =
 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+#ifdef RTE_LIBRTE_ICE_INC_VECTOR
+#ifdef RTE_ARCH_X86
+	struct ice_tx_queue *txq;
+	int i;
+
+	if (!ice_tx_vec_dev_check(dev)) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			(void)ice_txq_vec_setup(txq);
+		}
+		PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
+			    dev->data->port_id);
+		dev->tx_pkt_burst = ice_xmit_pkts_vec;
+		dev->tx_pkt_prepare = NULL;
+
+		return;
+	}
+#endif
+#endif
 
 	if (ad->tx_simple_allowed) {
 		PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index aab4a3a..02bb57e 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -173,10 +173,14 @@  void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 
 #ifdef RTE_LIBRTE_ICE_INC_VECTOR
 int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
+int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_rxq_vec_setup(struct ice_rx_queue *rxq);
+int ice_txq_vec_setup(struct ice_tx_queue *txq);
 uint16_t ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 			   uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 				     uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   uint16_t nb_pkts);
 #endif
 #endif /* _ICE_RXTX_H_ */
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 73837f7..8796ecb 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -71,6 +71,73 @@ 
 	return pkt_idx;
 }
 
+static __rte_always_inline int
+ice_tx_free_bufs(struct ice_tx_queue *txq)
+{
+	struct ice_tx_entry *txep;
+	uint32_t n;
+	uint32_t i;
+	int nb_free = 0;
+	struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
+
+	/* check DD bits on threshold descriptor */
+	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+			rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
+			rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
+		return 0;
+
+	n = txq->tx_rs_thresh;
+
+	 /* first buffer to free from S/W ring is at index
+	  * tx_next_dd - (tx_rs_thresh-1)
+	  */
+	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+	if (likely(m)) {
+		free[0] = m;
+		nb_free = 1;
+		for (i = 1; i < n; i++) {
+			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+			if (likely(m)) {
+				if (likely(m->pool == free[0]->pool)) {
+					free[nb_free++] = m;
+				} else {
+					rte_mempool_put_bulk(free[0]->pool,
+							     (void *)free,
+							     nb_free);
+					free[0] = m;
+					nb_free = 1;
+				}
+			}
+		}
+		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+	} else {
+		for (i = 1; i < n; i++) {
+			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+			if (m)
+				rte_mempool_put(m->pool, m);
+		}
+	}
+
+	/* buffers were freed, update counters */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+	if (txq->tx_next_dd >= txq->nb_tx_desc)
+		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	return txq->tx_rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct ice_tx_entry *txep,
+		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	int i;
+
+	for (i = 0; i < (int)nb_pkts; ++i)
+		txep[i].mbuf = tx_pkts[i];
+}
+
 static inline void
 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
 {
@@ -101,6 +168,34 @@ 
 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
 }
 
+static inline void
+_ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
+{
+	uint16_t i;
+
+	if (!txq || !txq->sw_ring) {
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+		return;
+	}
+
+	/**
+	 *  vPMD tx will not set sw_ring's mbuf to NULL after free,
+	 *  so need to free remains more carefully.
+	 */
+	i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+	if (txq->tx_tail < i) {
+		for (; i < txq->nb_tx_desc; i++) {
+			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+			txq->sw_ring[i].mbuf = NULL;
+		}
+		i = 0;
+	}
+	for (; i < txq->tx_tail; i++) {
+		rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+		txq->sw_ring[i].mbuf = NULL;
+	}
+}
+
 static inline int
 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 {
@@ -137,6 +232,29 @@ 
 	return 0;
 }
 
+#define ICE_NO_VECTOR_FLAGS (				 \
+		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
+		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
+		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
+		DEV_TX_OFFLOAD_TCP_CKSUM)
+
+static inline int
+ice_tx_vec_queue_default(struct ice_tx_queue *txq)
+{
+	if (!txq)
+		return -1;
+
+	if (txq->offloads & ICE_NO_VECTOR_FLAGS)
+		return -1;
+
+	if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
+	    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
+		return -1;
+
+	return 0;
+}
+
 static inline int
 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
@@ -152,4 +270,19 @@ 
 	return 0;
 }
 
+static inline int
+ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
+{
+	int i;
+	struct ice_tx_queue *txq;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (ice_tx_vec_queue_default(txq))
+			return -1;
+	}
+
+	return 0;
+}
+
 #endif
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 789cf07..6babb8d 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -505,12 +505,131 @@ 
 				      &split_flags[i]);
 }
 
+static inline void
+ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt,
+	 uint64_t flags)
+{
+	uint64_t high_qw =
+		(ICE_TX_DESC_DTYPE_DATA |
+		 ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
+		 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
+
+	__m128i descriptor = _mm_set_epi64x(high_qw,
+					    pkt->buf_iova + pkt->data_off);
+	_mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
+	uint16_t nb_pkts, uint64_t flags)
+{
+	int i;
+
+	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+		ice_vtx1(txdp, *pkt, flags);
+}
+
+static uint16_t
+ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+			 uint16_t nb_pkts)
+{
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+	volatile struct ice_tx_desc *txdp;
+	struct ice_tx_entry *txep;
+	uint16_t n, nb_commit, tx_id;
+	uint64_t flags = ICE_TD_CMD;
+	uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
+	int i;
+
+	/* cross rx_thresh boundary is not allowed */
+	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+	if (txq->nb_tx_free < txq->tx_free_thresh)
+		ice_tx_free_bufs(txq);
+
+	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+	nb_commit = nb_pkts;
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	tx_id = txq->tx_tail;
+	txdp = &txq->tx_ring[tx_id];
+	txep = &txq->sw_ring[tx_id];
+
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+	n = (uint16_t)(txq->nb_tx_desc - tx_id);
+	if (nb_commit >= n) {
+		tx_backlog_entry(txep, tx_pkts, n);
+
+		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+			ice_vtx1(txdp, *tx_pkts, flags);
+
+		ice_vtx1(txdp, *tx_pkts++, rs);
+
+		nb_commit = (uint16_t)(nb_commit - n);
+
+		tx_id = 0;
+		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+		/* avoid reach the end of ring */
+		txdp = &txq->tx_ring[tx_id];
+		txep = &txq->sw_ring[tx_id];
+	}
+
+	tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+	ice_vtx(txdp, tx_pkts, nb_commit, flags);
+
+	tx_id = (uint16_t)(tx_id + nb_commit);
+	if (tx_id > txq->tx_next_rs) {
+		txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
+					 ICE_TXD_QW1_CMD_S);
+		txq->tx_next_rs =
+			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+	}
+
+	txq->tx_tail = tx_id;
+
+	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+	return nb_pkts;
+}
+
+uint16_t
+ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+		  uint16_t nb_pkts)
+{
+	uint16_t nb_tx = 0;
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+	while (nb_pkts) {
+		uint16_t ret, num;
+
+		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+		ret = ice_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
+		nb_tx += ret;
+		nb_pkts -= ret;
+		if (ret < num)
+			break;
+	}
+
+	return nb_tx;
+}
+
 static void __attribute__((cold))
 ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
 {
 	_ice_rx_queue_release_mbufs_vec(rxq);
 }
 
+static void __attribute__((cold))
+ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
+{
+	_ice_tx_queue_release_mbufs_vec(txq);
+}
+
 int __attribute__((cold))
 ice_rxq_vec_setup(struct ice_rx_queue *rxq)
 {
@@ -522,7 +641,23 @@  int __attribute__((cold))
 }
 
 int __attribute__((cold))
+ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq)
+{
+	if (!txq)
+		return -1;
+
+	txq->tx_rel_mbufs = ice_tx_queue_release_mbufs_vec;
+	return 0;
+}
+
+int __attribute__((cold))
 ice_rx_vec_dev_check(struct rte_eth_dev *dev)
 {
 	return ice_rx_vec_dev_check_default(dev);
 }
+
+int __attribute__((cold))
+ice_tx_vec_dev_check(struct rte_eth_dev *dev)
+{
+	return ice_tx_vec_dev_check_default(dev);
+}