@@ -4193,14 +4193,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
-#ifdef CC_AVX512_SUPPORT
- if (use_avx512)
- iavf_txq_vec_setup_avx512(txq);
- else
- iavf_txq_vec_setup(txq);
-#else
iavf_txq_vec_setup(txq);
-#endif
}
if (no_poll_on_link_down) {
@@ -1736,14 +1736,14 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct ci_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
/* bit2 is reserved and must be set to 1 according to Spec */
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
if (txq->nb_tx_free < txq->tx_free_thresh)
- iavf_tx_free_bufs(txq);
+ ci_tx_free_bufs_vec(txq, iavf_tx_desc_done, false);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -1752,13 +1752,13 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ci_tx_backlog_entry(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
iavf_vtx(txdp, tx_pkts, n - 1, flags, offload);
tx_pkts += (n - 1);
@@ -1773,10 +1773,10 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
}
- ci_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
iavf_vtx(txdp, tx_pkts, nb_commit, flags, offload);
@@ -2357,14 +2357,6 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-int __rte_cold
-iavf_txq_vec_setup_avx512(struct ci_tx_queue *txq)
-{
- txq->vector_tx = true;
- txq->vector_sw_ring = true;
- return 0;
-}
-
uint16_t
iavf_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -24,12 +24,6 @@ iavf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
}
-static __rte_always_inline int
-iavf_tx_free_bufs(struct ci_tx_queue *txq)
-{
- return ci_tx_free_bufs(txq, iavf_tx_desc_done);
-}
-
static inline void
_iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq)
{
@@ -1368,14 +1368,14 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct ci_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = IAVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
int i;
if (txq->nb_tx_free < txq->tx_free_thresh)
- iavf_tx_free_bufs(txq);
+ ci_tx_free_bufs_vec(txq, iavf_tx_desc_done, false);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -1384,13 +1384,13 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ci_tx_backlog_entry(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
vtx1(txdp, *tx_pkts, flags);
@@ -1404,10 +1404,10 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
}
- ci_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
iavf_vtx(txdp, tx_pkts, nb_commit, flags);
@@ -1462,7 +1462,7 @@ int __rte_cold
iavf_txq_vec_setup(struct ci_tx_queue *txq)
{
txq->vector_tx = true;
- txq->vector_sw_ring = false;
+ txq->vector_sw_ring = txq->vector_tx;
return 0;
}