@@ -848,6 +848,129 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
}
}
+static __rte_always_inline void
+ice_vts1(volatile struct ice_ts_desc *ts, struct rte_mbuf *pkt,
+ uint16_t tx_tail, uint16_t nb_tx_desc, int ts_offset)
+{
+ ts->tx_desc_idx_tstamp = ice_get_ts_queue_desc(pkt,
+ tx_tail, nb_tx_desc, ts_offset);
+}
+
+static __rte_always_inline void
+ice_vts4(volatile struct ice_ts_desc *ts, struct rte_mbuf **pkt,
+ uint16_t nb_pkts, uint16_t tx_tail, uint16_t nb_tx_desc,
+ int ts_offset)
+{
+ uint16_t tx_id;
+
+ for (; nb_pkts > 3; ts += 4, pkt += 4, nb_pkts -= 4,
+ tx_tail += 4) {
+ tx_id = tx_tail + 4;
+ uint32_t ts_dsc3 = ice_get_ts_queue_desc(pkt[3],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 3;
+ uint32_t ts_dsc2 = ice_get_ts_queue_desc(pkt[2],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 2;
+ uint32_t ts_dsc1 = ice_get_ts_queue_desc(pkt[1],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 1;
+ uint32_t ts_dsc0 = ice_get_ts_queue_desc(pkt[0],
+ tx_id, nb_tx_desc, ts_offset);
+ __m128i desc0_3 = _mm_set_epi32(ts_dsc3, ts_dsc2,
+ ts_dsc1, ts_dsc0);
+ _mm_store_si128(RTE_CAST_PTR(void *, ts), desc0_3);
+ }
+
+ /* do any last ones */
+ while (nb_pkts) {
+ tx_tail++;
+ ice_vts1(ts, *pkt, tx_tail, nb_tx_desc, ts_offset);
+ ts++, pkt++, nb_pkts--;
+ }
+}
+
+static __rte_always_inline void
+ice_vts(volatile struct ice_ts_desc *ts, struct rte_mbuf **pkt,
+ uint16_t nb_pkts, uint16_t tx_tail, uint16_t nb_tx_desc,
+ int ts_offset)
+{
+ uint16_t tx_id;
+
+ for (; nb_pkts > 7; ts += 8, pkt += 8, nb_pkts -= 8,
+ tx_tail += 8) {
+ tx_id = tx_tail + 8;
+ uint32_t ts_dsc7 = ice_get_ts_queue_desc(pkt[7],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 7;
+ uint32_t ts_dsc6 = ice_get_ts_queue_desc(pkt[6],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 6;
+ uint32_t ts_dsc5 = ice_get_ts_queue_desc(pkt[5],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 5;
+ uint32_t ts_dsc4 = ice_get_ts_queue_desc(pkt[4],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 4;
+ uint32_t ts_dsc3 = ice_get_ts_queue_desc(pkt[3],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 3;
+ uint32_t ts_dsc2 = ice_get_ts_queue_desc(pkt[2],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 2;
+ uint32_t ts_dsc1 = ice_get_ts_queue_desc(pkt[1],
+ tx_id, nb_tx_desc, ts_offset);
+ tx_id = tx_tail + 1;
+ uint32_t ts_dsc0 = ice_get_ts_queue_desc(pkt[0],
+ tx_id, nb_tx_desc, ts_offset);
+ __m256i desc0_7 = _mm256_set_epi32(ts_dsc7, ts_dsc6,
+ ts_dsc5, ts_dsc4, ts_dsc3, ts_dsc2,
+ ts_dsc1, ts_dsc0);
+ _mm256_storeu_si256(RTE_CAST_PTR(void *, ts), desc0_7);
+ }
+
+ /* do any last ones */
+ if (nb_pkts)
+ ice_vts4(ts, pkt, nb_pkts, tx_tail, nb_tx_desc,
+ ts_offset);
+}
+
+static __rte_always_inline uint16_t
+ice_xmit_fixed_ts_burst_vec_avx512(struct ci_tx_queue *txq,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts,
+ uint16_t tx_tail)
+{
+ volatile struct ice_ts_desc *ts;
+ uint16_t n;
+ uint16_t ts_id;
+ uint16_t fetch;
+
+ ts_id = txq->tsq.ts_tail;
+ ts = &txq->tsq.ice_ts_ring[ts_id];
+
+ n = (uint16_t)(txq->tsq.nb_ts_desc - ts_id);
+ if (nb_pkts >= n) {
+ ice_vts(ts, tx_pkts, n, txq->tx_tail, txq->nb_tx_desc,
+ txq->tsq.ts_offset);
+ tx_pkts += n;
+ ts += n;
+ tx_tail += n;
+ nb_pkts = (uint16_t)(nb_pkts - n);
+ ts_id = 0;
+ ts = &txq->tsq.ice_ts_ring[ts_id];
+ fetch = txq->tsq.nb_ts_desc - txq->nb_tx_desc;
+ for (; ts_id < fetch; ts_id++, ts++)
+ ice_vts1(ts, *tx_pkts, tx_tail + 1,
+ txq->nb_tx_desc, txq->tsq.ts_offset);
+ }
+
+ ice_vts(ts, tx_pkts, nb_pkts, tx_tail, txq->nb_tx_desc,
+ txq->tsq.ts_offset);
+ ts_id = (uint16_t)(ts_id + nb_pkts);
+
+ return ts_id;
+}
+
static __rte_always_inline uint16_t
ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool offload)
@@ -855,7 +978,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
- uint16_t n, nb_commit, tx_id;
+ uint16_t n, nb_commit, tx_id, ts_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
@@ -875,6 +998,10 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+ if (txq->tsq.ts_flag > 0)
+ ts_id = ice_xmit_fixed_ts_burst_vec_avx512(txq,
+ tx_pkts, nb_commit, tx_id);
+
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
ci_tx_backlog_entry_vec(txep, tx_pkts, n);
@@ -910,7 +1037,12 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_tail = tx_id;
- ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
+ if (txq->tsq.ts_flag > 0) {
+ ICE_PCI_REG_WC_WRITE(txq->qtx_tail, ts_id);
+ txq->tsq.ts_tail = ts_id;
+ } else {
+ ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
+ }
return nb_pkts;
}
@@ -215,4 +215,21 @@ ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
}
+
+static inline uint32_t
+ice_get_ts_queue_desc(struct rte_mbuf *pkt, uint16_t tx_tail,
+ uint16_t nb_tx_desc, int ts_offset)
+{
+ uint64_t txtime;
+ uint32_t tstamp, ts_desc;
+
+ tx_tail = (tx_tail > nb_tx_desc) ? (tx_tail - nb_tx_desc) :
+ tx_tail;
+ txtime = *RTE_MBUF_DYNFIELD(pkt, ts_offset, uint64_t *);
+ tstamp = (uint32_t)(txtime % NS_PER_S) >>
+ ICE_TXTIME_CTX_RESOLUTION_128NS;
+ ts_desc = rte_cpu_to_le_32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M,
+ (tx_tail)) | FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp));
+ return ts_desc;
+}
#endif