@@ -179,7 +179,7 @@ ci_tx_free_bufs(struct ci_tx_queue *txq, ci_desc_done_fn desc_done)
}
static __rte_always_inline int
-ci_tx_free_bufs_vec(struct ci_tx_queue *txq, ci_desc_done_fn desc_done)
+ci_tx_free_bufs_vec(struct ci_tx_queue *txq, ci_desc_done_fn desc_done, bool ctx_descs)
{
int nb_free = 0;
struct rte_mbuf *free[IETH_VPMD_TX_MAX_FREE_BUF];
@@ -189,13 +189,13 @@ ci_tx_free_bufs_vec(struct ci_tx_queue *txq, ci_desc_done_fn desc_done)
if (!desc_done(txq, txq->tx_next_dd))
return 0;
- const uint32_t n = txq->tx_rs_thresh;
+ const uint32_t n = txq->tx_rs_thresh >> ctx_descs;
/* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh - 1)
*/
struct ci_tx_entry_vec *txep = txq->sw_ring_vec;
- txep += txq->tx_next_dd - (n - 1);
+ txep += (txq->tx_next_dd >> ctx_descs) - (n - 1);
if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
@@ -829,7 +829,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
if (txq->nb_tx_free < txq->tx_free_thresh)
- ci_tx_free_bufs_vec(txq, i40e_tx_desc_done);
+ ci_tx_free_bufs_vec(txq, i40e_tx_desc_done, false);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -1844,121 +1844,6 @@ iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
true);
}
-static __rte_always_inline int
-iavf_tx_free_bufs_avx512(struct ci_tx_queue *txq)
-{
- struct ci_tx_entry_vec *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
-
- /* check DD bits on threshold descriptor */
- if ((txq->iavf_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
- rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
- rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
- return 0;
-
- n = txq->tx_rs_thresh >> txq->use_ctx;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = (void *)txq->sw_ring;
- txep += (txq->tx_next_dd >> txq->use_ctx) - (n - 1);
-
- if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
- struct rte_mempool *mp = txep[0].mbuf->pool;
- struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
- rte_lcore_id());
- void **cache_objs;
-
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
- goto done;
- }
-
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
- /* Add elements back into the cache */
- uint32_t copied = 0;
- /* n is multiple of 32 */
- while (copied < n) {
-#ifdef RTE_ARCH_64
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
- const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
- const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
-
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 8], b);
- _mm512_storeu_si512(&cache_objs[copied + 16], c);
- _mm512_storeu_si512(&cache_objs[copied + 24], d);
-#else
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 16]);
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 16], b);
-#endif
- copied += 32;
- }
- cache->len += n;
-
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
- goto done;
- }
-
-normal:
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m)
- rte_mempool_put(m->pool, m);
- }
- }
-
-done:
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
static __rte_always_inline void
tx_backlog_entry_avx512(struct ci_tx_entry_vec *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2320,7 +2205,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
if (txq->nb_tx_free < txq->tx_free_thresh)
- iavf_tx_free_bufs_avx512(txq);
+ ci_tx_free_bufs_vec(txq, iavf_tx_desc_done, false);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -2388,7 +2273,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
if (txq->nb_tx_free < txq->tx_free_thresh)
- iavf_tx_free_bufs_avx512(txq);
+ ci_tx_free_bufs_vec(txq, iavf_tx_desc_done, true);
nb_commit = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts << 1);
nb_commit &= 0xFFFE;
@@ -949,7 +949,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- ci_tx_free_bufs_vec(txq, ice_tx_desc_done);
+ ci_tx_free_bufs_vec(txq, ice_tx_desc_done, false);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))