@@ -2549,7 +2549,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->tx_ring_phys_addr = tz->iova;
+ txq->tx_ring_dma = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
@@ -2923,7 +2923,7 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq)
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(tx_ctx));
tx_ctx.new_context = 1;
- tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.base = txq->tx_ring_dma / I40E_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
#ifdef RTE_LIBRTE_IEEE1588
@@ -3209,7 +3209,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
- txq->tx_ring_phys_addr = tz->iova;
+ txq->tx_ring_dma = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
@@ -129,7 +129,7 @@ struct i40e_rx_queue {
*/
struct i40e_tx_queue {
uint16_t nb_tx_desc; /**< number of TX descriptors */
- uint64_t tx_ring_phys_addr; /**< TX ring DMA address */
+ rte_iova_t tx_ring_dma; /**< TX ring DMA address */
volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */
struct ci_tx_entry *sw_ring; /**< virtual address of SW ring */
uint16_t tx_tail; /**< current value of tail register */
@@ -216,8 +216,8 @@ static inline bool
check_tx_vec_allow(struct iavf_tx_queue *txq)
{
if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
- txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
- txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
+ txq->tx_rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
+ txq->tx_rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
return true;
}
@@ -309,13 +309,13 @@ reset_tx_queue(struct iavf_tx_queue *txq)
}
txq->tx_tail = 0;
- txq->nb_used = 0;
+ txq->nb_tx_used = 0;
txq->last_desc_cleaned = txq->nb_tx_desc - 1;
- txq->nb_free = txq->nb_tx_desc - 1;
+ txq->nb_tx_free = txq->nb_tx_desc - 1;
- txq->next_dd = txq->rs_thresh - 1;
- txq->next_rs = txq->rs_thresh - 1;
+ txq->tx_next_dd = txq->tx_rs_thresh - 1;
+ txq->tx_next_rs = txq->tx_rs_thresh - 1;
}
static int
@@ -845,8 +845,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
txq->nb_tx_desc = nb_desc;
- txq->rs_thresh = tx_rs_thresh;
- txq->free_thresh = tx_free_thresh;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->tx_free_thresh = tx_free_thresh;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
@@ -881,7 +881,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
rte_free(txq);
return -ENOMEM;
}
- txq->tx_ring_phys_addr = mz->iova;
+ txq->tx_ring_dma = mz->iova;
txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
txq->mz = mz;
@@ -2387,7 +2387,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
volatile struct iavf_tx_desc *txd = txq->tx_ring;
- desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
@@ -2411,7 +2411,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
txq->last_desc_cleaned = desc_to_clean_to;
- txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
return 0;
}
@@ -2807,7 +2807,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Check if the descriptor ring needs to be cleaned. */
- if (txq->nb_free < txq->free_thresh)
+ if (txq->nb_tx_free < txq->tx_free_thresh)
iavf_xmit_cleanup(txq);
desc_idx = txq->tx_tail;
@@ -2862,14 +2862,14 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
- if (nb_desc_required > txq->nb_free) {
+ if (nb_desc_required > txq->nb_tx_free) {
if (iavf_xmit_cleanup(txq)) {
if (idx == 0)
return 0;
goto end_of_tx;
}
- if (unlikely(nb_desc_required > txq->rs_thresh)) {
- while (nb_desc_required > txq->nb_free) {
+ if (unlikely(nb_desc_required > txq->tx_rs_thresh)) {
+ while (nb_desc_required > txq->nb_tx_free) {
if (iavf_xmit_cleanup(txq)) {
if (idx == 0)
return 0;
@@ -2991,10 +2991,10 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* The last packet data descriptor needs End Of Packet (EOP) */
ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
- txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
- txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_desc_required);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_desc_required);
- if (txq->nb_used >= txq->rs_thresh) {
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
"%4u (port=%d queue=%d)",
desc_idx_last, txq->port_id, txq->queue_id);
@@ -3002,7 +3002,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
/* Update txq RS bit counters */
- txq->nb_used = 0;
+ txq->nb_tx_used = 0;
}
ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
@@ -4278,11 +4278,11 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
tx_id = txq->tx_tail;
tx_last = tx_id;
- if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
+ if (txq->nb_tx_free == 0 && iavf_xmit_cleanup(txq))
return 0;
- nb_tx_to_clean = txq->nb_free;
- nb_tx_free_last = txq->nb_free;
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
if (!free_cnt)
free_cnt = txq->nb_tx_desc;
@@ -4305,16 +4305,16 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
tx_id = swr_ring[tx_id].next_id;
} while (--nb_tx_to_clean && pkt_cnt < free_cnt && tx_id != tx_last);
- if (txq->rs_thresh > txq->nb_tx_desc -
- txq->nb_free || tx_id == tx_last)
+ if (txq->tx_rs_thresh > txq->nb_tx_desc -
+ txq->nb_tx_free || tx_id == tx_last)
break;
if (pkt_cnt < free_cnt) {
if (iavf_xmit_cleanup(txq))
break;
- nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
- nb_tx_free_last = txq->nb_free;
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
}
}
@@ -4356,8 +4356,8 @@ iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->nb_desc = txq->nb_tx_desc;
- qinfo->conf.tx_free_thresh = txq->free_thresh;
- qinfo->conf.tx_rs_thresh = txq->rs_thresh;
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
@@ -4432,8 +4432,8 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
desc = txq->tx_tail + offset;
/* go to next desc that has the RS bit */
- desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
- txq->rs_thresh;
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
if (desc >= txq->nb_tx_desc) {
desc -= txq->nb_tx_desc;
if (desc >= txq->nb_tx_desc)
@@ -277,25 +277,25 @@ struct iavf_rx_queue {
struct iavf_tx_queue {
const struct rte_memzone *mz; /* memzone for Tx ring */
volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
- uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
+ rte_iova_t tx_ring_dma; /* Tx ring DMA address */
struct ci_tx_entry *sw_ring; /* address array of SW ring */
uint16_t nb_tx_desc; /* ring length */
uint16_t tx_tail; /* current value of tail */
volatile uint8_t *qtx_tail; /* register address of tail */
/* number of used desc since RS bit set */
- uint16_t nb_used;
- uint16_t nb_free;
+ uint16_t nb_tx_used;
+ uint16_t nb_tx_free;
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
- uint16_t free_thresh;
- uint16_t rs_thresh;
+ uint16_t tx_free_thresh;
+ uint16_t tx_rs_thresh;
uint8_t rel_mbufs_type;
struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
uint16_t port_id;
uint16_t queue_id;
uint64_t offloads;
- uint16_t next_dd; /* next to set RS, for VPMD */
- uint16_t next_rs; /* next to check DD, for VPMD */
+ uint16_t tx_next_dd; /* next to set RS, for VPMD */
+ uint16_t tx_next_rs; /* next to check DD, for VPMD */
uint16_t ipsec_crypto_pkt_md_offset;
uint64_t mbuf_errors;
@@ -1742,18 +1742,19 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
- if (txq->nb_free < txq->free_thresh)
+ if (txq->nb_tx_free < txq->tx_free_thresh)
iavf_tx_free_bufs(txq);
- nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
return 0;
+ nb_commit = nb_pkts;
tx_id = txq->tx_tail;
txdp = &txq->tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
- txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
@@ -1768,7 +1769,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = (uint16_t)(nb_commit - n);
tx_id = 0;
- txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
txdp = &txq->tx_ring[tx_id];
@@ -1780,12 +1781,12 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
iavf_vtx(txdp, tx_pkts, nb_commit, flags, offload);
tx_id = (uint16_t)(tx_id + nb_commit);
- if (tx_id > txq->next_rs) {
- txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
- txq->next_rs =
- (uint16_t)(txq->next_rs + txq->rs_thresh);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
}
txq->tx_tail = tx_id;
@@ -1806,7 +1807,7 @@ iavf_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ret, num;
/* cross rs_thresh boundary is not allowed */
- num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
ret = iavf_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
num, offload);
nb_tx += ret;
@@ -1854,18 +1854,18 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz &
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
return 0;
- n = txq->rs_thresh >> txq->use_ctx;
+ n = txq->tx_rs_thresh >> txq->use_ctx;
/* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = (void *)txq->sw_ring;
- txep += (txq->next_dd >> txq->use_ctx) - (n - 1);
+ txep += (txq->tx_next_dd >> txq->use_ctx) - (n - 1);
if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
struct rte_mempool *mp = txep[0].mbuf->pool;
@@ -1951,12 +1951,12 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
done:
/* buffers were freed, update counters */
- txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
- txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
- if (txq->next_dd >= txq->nb_tx_desc)
- txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
- return txq->rs_thresh;
+ return txq->tx_rs_thresh;
}
static __rte_always_inline void
@@ -2319,19 +2319,20 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
- if (txq->nb_free < txq->free_thresh)
+ if (txq->nb_tx_free < txq->tx_free_thresh)
iavf_tx_free_bufs_avx512(txq);
- nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
return 0;
+ nb_commit = nb_pkts;
tx_id = txq->tx_tail;
txdp = &txq->tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
- txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
@@ -2346,7 +2347,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = (uint16_t)(nb_commit - n);
tx_id = 0;
- txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
txdp = &txq->tx_ring[tx_id];
@@ -2359,12 +2360,12 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
iavf_vtx(txdp, tx_pkts, nb_commit, flags, offload);
tx_id = (uint16_t)(tx_id + nb_commit);
- if (tx_id > txq->next_rs) {
- txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
- txq->next_rs =
- (uint16_t)(txq->next_rs + txq->rs_thresh);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
}
txq->tx_tail = tx_id;
@@ -2386,10 +2387,10 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
- if (txq->nb_free < txq->free_thresh)
+ if (txq->nb_tx_free < txq->tx_free_thresh)
iavf_tx_free_bufs_avx512(txq);
- nb_commit = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts << 1);
+ nb_commit = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts << 1);
nb_commit &= 0xFFFE;
if (unlikely(nb_commit == 0))
return 0;
@@ -2400,7 +2401,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
txep = (void *)txq->sw_ring;
txep += (tx_id >> 1);
- txq->nb_free = (uint16_t)(txq->nb_free - nb_commit);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_commit);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (n != 0 && nb_commit >= n) {
@@ -2414,7 +2415,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = (uint16_t)(nb_commit - n);
- txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
tx_id = 0;
/* avoid reach the end of ring */
txdp = txq->tx_ring;
@@ -2427,12 +2428,12 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
ctx_vtx(txdp, tx_pkts, nb_mbuf, flags, offload, txq->vlan_flag);
tx_id = (uint16_t)(tx_id + nb_commit);
- if (tx_id > txq->next_rs) {
- txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
- txq->next_rs =
- (uint16_t)(txq->next_rs + txq->rs_thresh);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
}
txq->tx_tail = tx_id;
@@ -2452,7 +2453,7 @@ iavf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ret, num;
/* cross rs_thresh boundary is not allowed */
- num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
ret = iavf_xmit_fixed_burst_vec_avx512(tx_queue, &tx_pkts[nb_tx],
num, offload);
nb_tx += ret;
@@ -2480,10 +2481,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
const uint16_t wrap_point = txq->nb_tx_desc >> txq->use_ctx; /* end of SW ring */
struct ci_tx_entry_vec *swr = (void *)txq->sw_ring;
- if (!txq->sw_ring || txq->nb_free == max_desc)
+ if (!txq->sw_ring || txq->nb_tx_free == max_desc)
return;
- i = (txq->next_dd - txq->rs_thresh + 1) >> txq->use_ctx;
+ i = (txq->tx_next_dd - txq->tx_rs_thresh + 1) >> txq->use_ctx;
while (i != end_desc) {
rte_pktmbuf_free_seg(swr[i].mbuf);
swr[i].mbuf = NULL;
@@ -2517,7 +2518,7 @@ iavf_xmit_pkts_vec_avx512_ctx_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ret, num;
/* cross rs_thresh boundary is not allowed */
- num = (uint16_t)RTE_MIN(nb_pkts << 1, txq->rs_thresh);
+ num = (uint16_t)RTE_MIN(nb_pkts << 1, txq->tx_rs_thresh);
num = num >> 1;
ret = iavf_xmit_fixed_burst_vec_avx512_ctx(tx_queue, &tx_pkts[nb_tx],
num, offload);
@@ -26,17 +26,17 @@ iavf_tx_free_bufs(struct iavf_tx_queue *txq)
struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz &
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
return 0;
- n = txq->rs_thresh;
+ n = txq->tx_rs_thresh;
/* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh-1)
*/
- txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
@@ -65,12 +65,12 @@ iavf_tx_free_bufs(struct iavf_tx_queue *txq)
}
/* buffers were freed, update counters */
- txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
- txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
- if (txq->next_dd >= txq->nb_tx_desc)
- txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
- return txq->rs_thresh;
+ return txq->tx_rs_thresh;
}
static inline void
@@ -109,10 +109,10 @@ _iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq)
unsigned i;
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- if (!txq->sw_ring || txq->nb_free == max_desc)
+ if (!txq->sw_ring || txq->nb_tx_free == max_desc)
return;
- i = txq->next_dd - txq->rs_thresh + 1;
+ i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
while (i != txq->tx_tail) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
@@ -169,8 +169,8 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (!txq)
return -1;
- if (txq->rs_thresh < IAVF_VPMD_TX_MAX_BURST ||
- txq->rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF)
+ if (txq->tx_rs_thresh < IAVF_VPMD_TX_MAX_BURST ||
+ txq->tx_rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF)
return -1;
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
@@ -1374,10 +1374,10 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
int i;
- if (txq->nb_free < txq->free_thresh)
+ if (txq->nb_tx_free < txq->tx_free_thresh)
iavf_tx_free_bufs(txq);
- nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
return 0;
nb_commit = nb_pkts;
@@ -1386,7 +1386,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txdp = &txq->tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
- txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
@@ -1400,7 +1400,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = (uint16_t)(nb_commit - n);
tx_id = 0;
- txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
txdp = &txq->tx_ring[tx_id];
@@ -1412,12 +1412,12 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
iavf_vtx(txdp, tx_pkts, nb_commit, flags);
tx_id = (uint16_t)(tx_id + nb_commit);
- if (tx_id > txq->next_rs) {
- txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
- txq->next_rs =
- (uint16_t)(txq->next_rs + txq->rs_thresh);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
}
txq->tx_tail = tx_id;
@@ -1441,7 +1441,7 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ret, num;
/* cross rs_thresh boundary is not allowed */
- num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
ret = iavf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
nb_tx += ret;
nb_pkts -= ret;
@@ -1247,7 +1247,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
/* Virtchnnl configure tx queues by pairs */
if (i < adapter->dev_data->nb_tx_queues) {
vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
- vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
+ vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
}
vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id;
@@ -124,7 +124,7 @@ static inline uint32_t ixgbe_read_addr(volatile void* addr)
rte_write32_wc_relaxed((rte_cpu_to_le_32(value)), reg)
#define IXGBE_PCI_REG_ADDR(hw, reg) \
- ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+ ((volatile void *)((char *)(hw)->hw_addr + (reg)))
#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \
IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
@@ -308,7 +308,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
@@ -946,7 +946,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -2786,11 +2786,11 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
hw->mac.type == ixgbe_mac_X550_vf ||
hw->mac.type == ixgbe_mac_X550EM_x_vf ||
hw->mac.type == ixgbe_mac_X550EM_a_vf)
- txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
+ txq->qtx_tail = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
else
- txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
+ txq->qtx_tail = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = tz->iova;
+ txq->tx_ring_dma = tz->iova;
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -2802,7 +2802,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
- txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_dma);
/* set up vector or scalar TX function as appropriate */
ixgbe_set_tx_function(dev, txq);
@@ -5303,7 +5303,7 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- bus_addr = txq->tx_ring_phys_addr;
+ bus_addr = txq->tx_ring_dma;
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
(uint32_t)(bus_addr & 0x00000000ffffffffULL));
IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
@@ -5887,7 +5887,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
/* Setup the Base and Length of the Tx Descriptor Rings */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- bus_addr = txq->tx_ring_phys_addr;
+ bus_addr = txq->tx_ring_dma;
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
(uint32_t)(bus_addr & 0x00000000ffffffffULL));
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
@@ -186,12 +186,12 @@ struct ixgbe_advctx_info {
struct ixgbe_tx_queue {
/** TX ring virtual address. */
volatile union ixgbe_adv_tx_desc *tx_ring;
- uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ rte_iova_t tx_ring_dma; /**< TX ring DMA address. */
union {
struct ci_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
struct ci_tx_entry_vec *sw_ring_v; /**< address of SW ring for vector PMD */
};
- volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ volatile uint8_t *qtx_tail; /**< Address of TDT register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_tail; /**< current value of TDT reg. */
/**< Start freeing TX buffers if there are less free descriptors than
@@ -218,7 +218,7 @@ struct ixgbe_tx_queue {
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
const struct ixgbe_txq_ops *ops; /**< txq ops */
- uint8_t tx_deferred_start; /**< not in global dev start. */
+ bool tx_deferred_start; /**< not in global dev start. */
#ifdef RTE_LIB_SECURITY
uint8_t using_ipsec;
/**< indicates that IPsec TX feature is in use */
@@ -628,7 +628,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_tail = tx_id;
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
@@ -751,7 +751,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_tail = tx_id;
- IXGBE_PCI_REG_WC_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}