@@ -542,6 +542,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
.rxq_info_get = ixgbe_rxq_info_get,
.txq_info_get = ixgbe_txq_info_get,
+ .rxq_rearm_data_get = ixgbe_rxq_rearm_data_get,
.timesync_enable = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -625,6 +625,9 @@ void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+void ixgbe_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_rearm_data *rxq_rearm_data);
+
int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
@@ -2559,6 +2559,9 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
ixgbe_txq_vec_setup(txq) == 0)) {
PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+#ifdef RTE_ARCH_ARM64
+ dev->tx_fill_sw_ring = ixgbe_tx_fill_sw_ring;
+#endif
} else
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
@@ -4853,6 +4856,9 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+#ifdef RTE_ARCH_ARM64
+ dev->rx_flush_descriptor = ixgbe_rx_flush_descriptor_vec;
+#endif
} else if (adapter->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
@@ -5623,6 +5629,19 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+void
+ixgbe_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_rearm_data *rxq_rearm_data)
+{
+ struct ixgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ rxq_rearm_data->rx_sw_ring = rxq->sw_ring;
+ rxq_rearm_data->rearm_start = &rxq->rxrearm_start;
+ rxq_rearm_data->rearm_nb = &rxq->rxrearm_nb;
+}
+
/*
* [VF] Initializes Receive Unit.
*/
@@ -295,6 +295,10 @@ int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
+int ixgbe_tx_fill_sw_ring(void *tx_queue,
+ struct rte_eth_rxq_rearm_data *rxq_rearm_data);
+int ixgbe_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm);
+
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
@@ -129,6 +129,54 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
return txq->tx_rs_thresh;
}
+int
+ixgbe_tx_fill_sw_ring(void *tx_queue,
+ struct rte_eth_rxq_rearm_data *rxq_rearm_data)
+{
+ struct ixgbe_tx_queue *txq = tx_queue;
+ struct ixgbe_tx_entry_v *txep;
+ void **rxep;
+ uint32_t status;
+ struct rte_mbuf *m;
+ int i, n;
+ int nb_rearm = 0;
+
+ if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
+ txq->nb_tx_free > txq->tx_free_thresh)
+ return 0;
+
+ /* check DD bits on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
+ rxep = rxq_rearm_data->rx_sw_ring;
+ rxep += *rxq_rearm_data->rearm_start;
+
+ for (i = 0; i < n; i++, rxep++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL) {
+ *rxep = m;
+ nb_rearm++;
+ }
+ }
+ n = nb_rearm;
+
+ /* update counters for Tx */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return n;
+}
+
static __rte_always_inline void
tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -633,6 +633,58 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
+int
+ixgbe_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_entry *rxep;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct rte_mbuf *mb;
+ uint16_t rx_id;
+ uint64x2_t dma_addr;
+ uint64x2_t zero = vdupq_n_u64(0);
+ uint64_t paddr;
+ uint8x8_t p;
+ uint16_t i;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+ rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+ p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
+
+ for (i = 0; i < nb_rearm; i++) {
+ mb = rxep[i].mbuf;
+ /*
+ * Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ */
+ vst1_u8((uint8_t *)&mb->rearm_data, p);
+ /* Initialize rxdp descs */
+ paddr = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ dma_addr = vsetq_lane_u64(paddr, zero, 0);
+ /* flush desc with pa dma_addr */
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr);
+ }
+
+ /* Update the descriptor initializer index */
+ rxq->rxrearm_start += nb_rearm;
+ rx_id = rxq->rxrearm_start - 1;
+
+ if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) {
+ rxq->rxrearm_start = rxq->rxrearm_start - rxq->nb_rx_desc;
+ if (!rxq->rxrearm_start)
+ rx_id = rxq->nb_rx_desc - 1;
+ else
+ rx_id = rxq->rxrearm_start - 1;
+ }
+ rxq->rxrearm_nb -= nb_rearm;
+
+ /* Update the tail pointer on the NIC */
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+
+ return 0;
+}
+
static void __rte_cold
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{