@@ -109,6 +109,13 @@ ci_tx_backlog_entry(struct ci_tx_entry *txep, struct rte_mbuf **tx_pkts, uint16_
txep[i].mbuf = tx_pkts[i];
}
+static __rte_always_inline void
+ci_tx_backlog_entry_vec(struct ci_tx_entry_vec *txep, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ for (uint16_t i = 0; i < nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
#define IETH_VPMD_TX_MAX_FREE_BUF 64
typedef int (*ci_desc_done_fn)(struct ci_tx_queue *txq, uint16_t idx);
@@ -825,7 +825,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
/* record what kind of descriptor cleanup we need on teardown */
txq->vector_tx = ad->tx_vec_allowed;
- txq->vector_sw_ring = ad->tx_use_avx512;
+ txq->vector_sw_ring = txq->vector_tx;
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -858,7 +858,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
- struct ci_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
@@ -867,7 +867,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_tx_free_bufs_vec(txq);
+ ci_tx_free_bufs_vec(txq, ice_tx_desc_done, false);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -875,13 +875,13 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->ice_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ci_tx_backlog_entry(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
ice_vtx(txdp, tx_pkts, n - 1, flags, offload);
tx_pkts += (n - 1);
@@ -896,10 +896,10 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->ice_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
}
- ci_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
ice_vtx(txdp, tx_pkts, nb_commit, flags, offload);
@@ -924,16 +924,6 @@ ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
}
}
-static __rte_always_inline void
-ice_tx_backlog_entry_avx512(struct ci_tx_entry_vec *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
-
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
-
static __rte_always_inline uint16_t
ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool do_offload)
@@ -964,7 +954,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);
tx_pkts += (n - 1);
@@ -982,7 +972,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txep = (void *)txq->sw_ring;
}
- ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);
@@ -20,12 +20,6 @@ ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
}
-static __rte_always_inline int
-ice_tx_free_bufs_vec(struct ci_tx_queue *txq)
-{
- return ci_tx_free_bufs(txq, ice_tx_desc_done);
-}
-
static inline void
_ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
{
@@ -699,7 +699,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
- struct ci_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
@@ -709,7 +709,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_tx_free_bufs_vec(txq);
+ ci_tx_free_bufs_vec(txq, ice_tx_desc_done, false);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
nb_commit = nb_pkts;
@@ -718,13 +718,13 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->ice_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ci_tx_backlog_entry(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
ice_vtx1(txdp, *tx_pkts, flags);
@@ -738,10 +738,10 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->ice_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
}
- ci_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
ice_vtx(txdp, tx_pkts, nb_commit, flags);