Some clients have opinions about how often to flush the
transmit ring.
The default value is the number of Tx descriptors minus the
default Tx burst size.
Signed-off-by: Andrew Boyer <andrew.boyer@amd.com>
---
drivers/net/ionic/ionic_dev.h | 1 +
drivers/net/ionic/ionic_ethdev.c | 4 ++--
drivers/net/ionic/ionic_lif.h | 1 +
drivers/net/ionic/ionic_rxtx.c | 17 +++++++++++++++--
4 files changed, 19 insertions(+), 4 deletions(-)
@@ -20,6 +20,7 @@
#define IONIC_MAX_RING_DESC 32768
#define IONIC_MIN_RING_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_DEF_TXRX_BURST 32
#define IONIC_DEVCMD_TIMEOUT 5 /* devcmd_timeout */
#define IONIC_DEVCMD_CHECK_PERIOD_US 10 /* devcmd status chk period */
@@ -429,8 +429,8 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->tx_desc_lim = tx_desc_lim_v1;
/* Driver-preferred Rx/Tx parameters */
- dev_info->default_rxportconf.burst_size = 32;
- dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.burst_size = IONIC_DEF_TXRX_BURST;
+ dev_info->default_txportconf.burst_size = IONIC_DEF_TXRX_BURST;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = IONIC_DEF_TXRX_DESC;
@@ -104,6 +104,7 @@ struct ionic_tx_qcq {
/* cacheline2 */
uint16_t num_segs_fw; /* # segs supported by current FW */
+ uint16_t free_thresh;
uint16_t flags;
struct ionic_tx_stats stats;
@@ -223,6 +223,13 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
+ if (tx_conf->tx_free_thresh > nb_desc) {
+ IONIC_PRINT(ERR,
+ "tx_free_thresh must be less than nb_desc (%u)",
+ nb_desc);
+ return -EINVAL;
+ }
+
/* Free memory prior to re-allocation if needed... */
if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
@@ -252,6 +259,10 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
txq->flags |= IONIC_QCQ_F_FAST_FREE;
+ txq->free_thresh =
+ tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
+ nb_desc - IONIC_DEF_TXRX_BURST;
+
eth_dev->data->tx_queues[tx_queue_id] = txq;
return 0;
@@ -609,8 +620,10 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_mbuf_prefetch_part2(tx_pkts[0]);
}
- /* Cleaning old buffers */
- ionic_tx_flush(txq);
+ if (unlikely(ionic_q_space_avail(q) < txq->free_thresh)) {
+ /* Cleaning old buffers */
+ ionic_tx_flush(txq);
+ }
nb_avail = ionic_q_space_avail(q);
if (unlikely(nb_avail < nb_pkts)) {