[v3] net/cnxk: support Tx queue descriptor count
Checks
Commit Message
From: Satha Rao <skoteshwar@marvell.com>
Added CNXK APIs to get used txq descriptor count.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
Depends-on: series-30833 ("ethdev: support Tx queue used count")
v2:
Updated release notes and fixed API for CPT queues.
v3:
Addressed review comments
doc/guides/nics/features/cnxk.ini | 1 +
doc/guides/rel_notes/release_24_03.rst | 1 +
drivers/net/cnxk/cn10k_tx_select.c | 22 ++++++++++++++++++++++
drivers/net/cnxk/cn9k_tx_select.c | 23 +++++++++++++++++++++++
drivers/net/cnxk/cnxk_ethdev.h | 24 ++++++++++++++++++++++++
5 files changed, 71 insertions(+)
@@ -40,6 +40,7 @@ Timesync = Y
Timestamp offload = Y
Rx descriptor status = Y
Tx descriptor status = Y
+Tx queue count = Y
Basic stats = Y
Stats per queue = Y
Extended stats = Y
@@ -111,6 +111,7 @@ New Features
* Added support for ``RTE_FLOW_ITEM_TYPE_PPPOES`` flow item.
* Added support for ``RTE_FLOW_ACTION_TYPE_SAMPLE`` flow item.
* Added support for Rx inject.
+ * Added support for ``rte_eth_tx_queue_count``.
* **Updated Marvell OCTEON EP driver.**
@@ -20,6 +20,24 @@
eth_dev->tx_pkt_burst;
}
+#if defined(RTE_ARCH_ARM64)
+static int
+cn10k_nix_tx_queue_count(void *tx_queue)
+{
+ struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
+}
+
+static int
+cn10k_nix_tx_queue_sec_count(void *tx_queue)
+{
+ struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
+}
+#endif
+
void
cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
{
@@ -63,6 +81,10 @@
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ eth_dev->tx_queue_count = cn10k_nix_tx_queue_sec_count;
+ else
+ eth_dev->tx_queue_count = cn10k_nix_tx_queue_count;
rte_mb();
#else
@@ -20,6 +20,24 @@
eth_dev->tx_pkt_burst;
}
+#if defined(RTE_ARCH_ARM64)
+static int
+cn9k_nix_tx_queue_count(void *tx_queue)
+{
+ struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
+}
+
+static int
+cn9k_nix_tx_queue_sec_count(void *tx_queue)
+{
+ struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
+}
+#endif
+
void
cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
{
@@ -59,6 +77,11 @@
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ eth_dev->tx_queue_count = cn9k_nix_tx_queue_sec_count;
+ else
+ eth_dev->tx_queue_count = cn9k_nix_tx_queue_count;
+
rte_mb();
#else
@@ -461,6 +461,30 @@ struct cnxk_eth_txq_sp {
return ((struct cnxk_eth_txq_sp *)__txq) - 1;
}
+static inline int
+cnxk_nix_tx_queue_count(uint64_t *mem, uint16_t sqes_per_sqb_log2)
+{
+ uint64_t val;
+
+ val = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t *))mem, rte_memory_order_relaxed);
+ val = (val << sqes_per_sqb_log2) - val;
+
+ return (val & 0xFFFF);
+}
+
+static inline int
+cnxk_nix_tx_queue_sec_count(uint64_t *mem, uint16_t sqes_per_sqb_log2, uint64_t *sec_fc)
+{
+ uint64_t sq_cnt, sec_cnt, val;
+
+ sq_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t *))mem, rte_memory_order_relaxed);
+ sq_cnt = (sq_cnt << sqes_per_sqb_log2) - sq_cnt;
+ sec_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t *))sec_fc, rte_memory_order_relaxed);
+ val = RTE_MAX(sq_cnt, sec_cnt);
+
+ return (val & 0xFFFF);
+}
+
/* Common ethdev ops */
extern struct eth_dev_ops cnxk_eth_dev_ops;