@@ -7,6 +7,7 @@
#include "idpf_common_rxtx.h"
#include "idpf_common_device.h"
+#include "idpf_rxtx_vec_common.h"
static __rte_always_inline void
idpf_singleq_rx_rearm(struct idpf_rx_queue *rxq)
@@ -481,64 +482,6 @@ idpf_dp_singleq_recv_pkts_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16
return _idpf_singleq_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts);
}
-static __rte_always_inline int
-idpf_singleq_tx_free_bufs_vec(struct ci_tx_queue *txq)
-{
- struct ci_tx_entry *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m;
- struct rte_mbuf **free = alloca(sizeof(struct rte_mbuf *) * txq->tx_rs_thresh);
-
- /* check DD bits on threshold descriptor */
- if ((txq->idpf_tx_ring[txq->tx_next_dd].qw1 &
- rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
- rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m)
- rte_mempool_put(m->pool, m);
- }
- }
-
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
static inline void
idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
@@ -623,7 +566,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- idpf_singleq_tx_free_bufs_vec(txq);
+ ci_tx_free_bufs_vec(txq, idpf_tx_desc_done, false);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -6,6 +6,7 @@
#include <rte_vect.h>
#include "idpf_common_device.h"
#include "idpf_common_rxtx.h"
+#include "idpf_rxtx_vec_common.h"
#define IDPF_DESCS_PER_LOOP_AVX 8
#define PKTLEN_SHIFT 10
@@ -998,122 +999,6 @@ idpf_dp_splitq_recv_pkts_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_pkts);
}
-static __rte_always_inline int
-idpf_tx_singleq_free_bufs_avx512(struct ci_tx_queue *txq)
-{
- struct ci_tx_entry_vec *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m;
- struct rte_mbuf **free = alloca(sizeof(struct rte_mbuf *) * txq->tx_rs_thresh);
-
- /* check DD bits on threshold descriptor */
- if ((txq->idpf_tx_ring[txq->tx_next_dd].qw1 &
- rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
- rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = (void *)txq->sw_ring;
- txep += txq->tx_next_dd - (n - 1);
-
- if (txq->offloads & IDPF_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
- struct rte_mempool *mp = txep[0].mbuf->pool;
- struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
- rte_lcore_id());
- void **cache_objs;
-
- if (cache == NULL || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
- goto done;
- }
-
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
- /* Add elements back into the cache */
- uint32_t copied = 0;
- /* n is multiple of 32 */
- while (copied < n) {
-#ifdef RTE_ARCH_64
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
- const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
- const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
-
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 8], b);
- _mm512_storeu_si512(&cache_objs[copied + 16], c);
- _mm512_storeu_si512(&cache_objs[copied + 24], d);
-#else
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 16]);
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 16], b);
-#endif
- copied += 32;
- }
- cache->len += n;
-
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
- goto done;
- }
-
-normal:
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
-done:
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
static __rte_always_inline void
idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
@@ -1197,7 +1082,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- idpf_tx_singleq_free_bufs_avx512(txq);
+ ci_tx_free_bufs_vec(txq, idpf_tx_desc_done, false);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
nb_commit = nb_pkts;
@@ -1314,110 +1199,6 @@ idpf_splitq_scan_cq_ring(struct ci_tx_queue *cq)
cq->tx_tail = cq_qid;
}
-static __rte_always_inline int
-idpf_tx_splitq_free_bufs_avx512(struct ci_tx_queue *txq)
-{
- struct ci_tx_entry_vec *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m;
- struct rte_mbuf **free = alloca(sizeof(struct rte_mbuf *) * txq->tx_rs_thresh);
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = (void *)txq->sw_ring;
- txep += txq->tx_next_dd - (n - 1);
-
- if (txq->offloads & IDPF_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
- struct rte_mempool *mp = txep[0].mbuf->pool;
- struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
- rte_lcore_id());
- void **cache_objs;
-
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
- goto done;
- }
-
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
- /* Add elements back into the cache */
- uint32_t copied = 0;
- /* n is multiple of 32 */
- while (copied < n) {
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
- const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
- const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
-
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 8], b);
- _mm512_storeu_si512(&cache_objs[copied + 16], c);
- _mm512_storeu_si512(&cache_objs[copied + 24], d);
- copied += 32;
- }
- cache->len += n;
-
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
- goto done;
- }
-
-normal:
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m)
- rte_mempool_put(m->pool, m);
- }
- }
-
-done:
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
- txq->ctype[IDPF_TXD_COMPLT_RS] -= txq->tx_rs_thresh;
-
- return txq->tx_rs_thresh;
-}
-
#define IDPF_TXD_FLEX_QW1_TX_BUF_SZ_S 48
static __rte_always_inline void
@@ -1559,11 +1340,12 @@ idpf_splitq_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
while (nb_pkts) {
uint16_t ret, num;
-
idpf_splitq_scan_cq_ring(txq->complq);
- if (txq->ctype[IDPF_TXD_COMPLT_RS] > txq->tx_free_thresh)
- idpf_tx_splitq_free_bufs_avx512(txq);
+ if (txq->ctype[IDPF_TXD_COMPLT_RS] > txq->tx_free_thresh) {
+ ci_tx_free_bufs_vec(txq, idpf_tx_desc_done, false);
+ txq->ctype[IDPF_TXD_COMPLT_RS] -= txq->tx_rs_thresh;
+ }
num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
ret = idpf_splitq_xmit_fixed_burst_vec_avx512(tx_queue,
@@ -28,6 +28,14 @@
RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
+static inline int
+idpf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
+{
+ return (txq->idpf_tx_ring[idx].qw1 &
+ rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) ==
+ rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
+}
+
static inline int
idpf_rx_vec_queue_default(struct idpf_rx_queue *rxq)
{