@@ -843,16 +843,9 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
sq = &cnxk_eth_dev->sqs[tx_queue_id];
txq = eth_dev->data->tx_queues[tx_queue_id];
sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
- sq->nb_sqb_bufs_adj =
- sq->nb_sqb_bufs -
- RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
- sqes_per_sqb;
if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
- sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
- (sqes_per_sqb - 1));
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / sqes_per_sqb);
txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
- txq->nb_sqb_bufs_adj =
- ((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
}
}
@@ -32,9 +32,9 @@ cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
static __rte_always_inline int32_t
cn10k_sso_sq_depth(const struct cn10k_eth_txq *txq)
{
- return (txq->nb_sqb_bufs_adj -
- __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
- << txq->sqes_per_sqb_log2;
+ int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
+ (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+ return (avail << txq->sqes_per_sqb_log2) - avail;
}
static __rte_always_inline uint16_t
@@ -877,16 +877,9 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
sq = &cnxk_eth_dev->sqs[tx_queue_id];
txq = eth_dev->data->tx_queues[tx_queue_id];
sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
- sq->nb_sqb_bufs_adj =
- sq->nb_sqb_bufs -
- RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
- sqes_per_sqb;
if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
- sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
- (sqes_per_sqb - 1));
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / sqes_per_sqb);
txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
- txq->nb_sqb_bufs_adj =
- ((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
}
}
@@ -713,6 +713,14 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
}
#endif
+static __rte_always_inline int32_t
+cn9k_sso_sq_depth(const struct cn9k_eth_txq *txq)
+{
+ int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
+ (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+ return (avail << txq->sqes_per_sqb_log2) - avail;
+}
+
static __rte_always_inline uint16_t
cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
uint64_t *txq_data, const uint32_t flags)
@@ -736,9 +744,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
handle_tx_completion_pkts(txq, 1);
- if (((txq->nb_sqb_bufs_adj -
- __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
- << txq->sqes_per_sqb_log2) <= 0)
+ if (cn9k_sso_sq_depth(txq) <= 0)
return 0;
cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
cn9k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
@@ -35,12 +35,13 @@
#define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
do { \
+ int64_t avail; \
/* Cached value is low, Update the fc_cache_pkts */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
+ avail = txq->nb_sqb_bufs_adj - *txq->fc_mem; \
/* Multiply with sqe_per_sqb to express in pkts */ \
(txq)->fc_cache_pkts = \
- ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) \
- << (txq)->sqes_per_sqb_log2; \
+ (avail << (txq)->sqes_per_sqb_log2) - avail; \
/* Check it again for the room */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) \
return 0; \
@@ -113,10 +114,9 @@ cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
if (cached < 0) {
/* Check if we have space else retry. */
do {
- refill =
- (txq->nb_sqb_bufs_adj -
- __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
- << txq->sqes_per_sqb_log2;
+ refill = txq->nb_sqb_bufs_adj -
+ __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+ refill = (refill << txq->sqes_per_sqb_log2) - refill;
} while (refill <= 0);
__atomic_compare_exchange(&txq->fc_cache_pkts, &cached, &refill,
0, __ATOMIC_RELEASE,
@@ -32,12 +32,13 @@
#define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
do { \
+ int64_t avail; \
/* Cached value is low, Update the fc_cache_pkts */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
+ avail = txq->nb_sqb_bufs_adj - *txq->fc_mem; \
/* Multiply with sqe_per_sqb to express in pkts */ \
(txq)->fc_cache_pkts = \
- ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) \
- << (txq)->sqes_per_sqb_log2; \
+ (avail << (txq)->sqes_per_sqb_log2) - avail; \
/* Check it again for the room */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) \
return 0; \