[v6] net/cnxk: support Tx queue descriptor count

Message ID 1709545971-17364-1-git-send-email-skoteshwar@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [v6] net/cnxk: support Tx queue descriptor count |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/iol-testing warning apply patch failure

Commit Message

Satha Koteswara Rao Kottidi March 4, 2024, 9:52 a.m. UTC
  From: Satha Rao <skoteshwar@marvell.com>

Added CNXK APIs to get used txq descriptor count.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---

Depends-on: series-30833 ("ethdev: support Tx queue used count")

v2:
  Updated release notes and fixed API for CPT queues.
v3:
  Addressed review comments
v5:
  Fixed compilation errors
v6:
  Fixed checkpatch

 doc/guides/nics/features/cnxk.ini      |  1 +
 doc/guides/rel_notes/release_24_03.rst |  1 +
 drivers/net/cnxk/cn10k_tx_select.c     | 22 ++++++++++++++++++++++
 drivers/net/cnxk/cn9k_tx_select.c      | 23 +++++++++++++++++++++++
 drivers/net/cnxk/cnxk_ethdev.h         | 25 +++++++++++++++++++++++++
 5 files changed, 72 insertions(+)
  

Comments

Jerin Jacob March 4, 2024, 11:44 a.m. UTC | #1
On Mon, Mar 4, 2024 at 3:30 PM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> Added CNXK APIs to get used txq descriptor count.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>

Applied to dpdk-next-net-mrvl/for-main. Thanks


> ---
>
> Depends-on: series-30833 ("ethdev: support Tx queue used count")
>
> v2:
>   Updated release notes and fixed API for CPT queues.
> v3:
>   Addressed review comments
> v5:
>   Fixed compilation errors
> v6:
>   Fixed checkpatch
>
>  doc/guides/nics/features/cnxk.ini      |  1 +
>  doc/guides/rel_notes/release_24_03.rst |  1 +
>  drivers/net/cnxk/cn10k_tx_select.c     | 22 ++++++++++++++++++++++
>  drivers/net/cnxk/cn9k_tx_select.c      | 23 +++++++++++++++++++++++
>  drivers/net/cnxk/cnxk_ethdev.h         | 25 +++++++++++++++++++++++++
>  5 files changed, 72 insertions(+)
>
> diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini
> index b5d9f7e..1c8db1a 100644
> --- a/doc/guides/nics/features/cnxk.ini
> +++ b/doc/guides/nics/features/cnxk.ini
> @@ -40,6 +40,7 @@ Timesync             = Y
>  Timestamp offload    = Y
>  Rx descriptor status = Y
>  Tx descriptor status = Y
> +Tx queue count       = Y
>  Basic stats          = Y
>  Stats per queue      = Y
>  Extended stats       = Y
> diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
> index 2b160cf..b1942b5 100644
> --- a/doc/guides/rel_notes/release_24_03.rst
> +++ b/doc/guides/rel_notes/release_24_03.rst
> @@ -113,6 +113,7 @@ New Features
>    * Added support for Rx inject.
>    * Optimized SW external mbuf free for better performance and avoid SQ corruption.
>    * Added support for port representors.
> +  * Added support for ``rte_eth_tx_queue_count``.
>
>  * **Updated Marvell OCTEON EP driver.**
>
> diff --git a/drivers/net/cnxk/cn10k_tx_select.c b/drivers/net/cnxk/cn10k_tx_select.c
> index 404f5ba..aa0620e 100644
> --- a/drivers/net/cnxk/cn10k_tx_select.c
> +++ b/drivers/net/cnxk/cn10k_tx_select.c
> @@ -20,6 +20,24 @@
>                         eth_dev->tx_pkt_burst;
>  }
>
> +#if defined(RTE_ARCH_ARM64)
> +static int
> +cn10k_nix_tx_queue_count(void *tx_queue)
> +{
> +       struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
> +
> +       return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
> +}
> +
> +static int
> +cn10k_nix_tx_queue_sec_count(void *tx_queue)
> +{
> +       struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
> +
> +       return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
> +}
> +#endif
> +
>  void
>  cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
>  {
> @@ -63,6 +81,10 @@
>                 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
>                         pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
>         }
> +       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
> +               eth_dev->tx_queue_count = cn10k_nix_tx_queue_sec_count;
> +       else
> +               eth_dev->tx_queue_count = cn10k_nix_tx_queue_count;
>
>         rte_mb();
>  #else
> diff --git a/drivers/net/cnxk/cn9k_tx_select.c b/drivers/net/cnxk/cn9k_tx_select.c
> index e08883f..5ecf919 100644
> --- a/drivers/net/cnxk/cn9k_tx_select.c
> +++ b/drivers/net/cnxk/cn9k_tx_select.c
> @@ -20,6 +20,24 @@
>                         eth_dev->tx_pkt_burst;
>  }
>
> +#if defined(RTE_ARCH_ARM64)
> +static int
> +cn9k_nix_tx_queue_count(void *tx_queue)
> +{
> +       struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
> +
> +       return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
> +}
> +
> +static int
> +cn9k_nix_tx_queue_sec_count(void *tx_queue)
> +{
> +       struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
> +
> +       return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
> +}
> +#endif
> +
>  void
>  cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
>  {
> @@ -59,6 +77,11 @@
>                 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
>                         pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
>         }
> +       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
> +               eth_dev->tx_queue_count = cn9k_nix_tx_queue_sec_count;
> +       else
> +               eth_dev->tx_queue_count = cn9k_nix_tx_queue_count;
> +
>
>         rte_mb();
>  #else
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index 5d42e13..5e04064 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -464,6 +464,31 @@ struct cnxk_eth_txq_sp {
>         return ((struct cnxk_eth_txq_sp *)__txq) - 1;
>  }
>
> +static inline int
> +cnxk_nix_tx_queue_count(uint64_t *mem, uint16_t sqes_per_sqb_log2)
> +{
> +       uint64_t val;
> +
> +       val = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)mem, rte_memory_order_relaxed);
> +       val = (val << sqes_per_sqb_log2) - val;
> +
> +       return (val & 0xFFFF);
> +}
> +
> +static inline int
> +cnxk_nix_tx_queue_sec_count(uint64_t *mem, uint16_t sqes_per_sqb_log2, uint64_t *sec_fc)
> +{
> +       uint64_t sq_cnt, sec_cnt, val;
> +
> +       sq_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)mem, rte_memory_order_relaxed);
> +       sq_cnt = (sq_cnt << sqes_per_sqb_log2) - sq_cnt;
> +       sec_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)sec_fc,
> +                                          rte_memory_order_relaxed);
> +       val = RTE_MAX(sq_cnt, sec_cnt);
> +
> +       return (val & 0xFFFF);
> +}
> +
>  /* Common ethdev ops */
>  extern struct eth_dev_ops cnxk_eth_dev_ops;
>
> --
> 1.8.3.1
>
  

Patch

diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini
index b5d9f7e..1c8db1a 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -40,6 +40,7 @@  Timesync             = Y
 Timestamp offload    = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Tx queue count       = Y
 Basic stats          = Y
 Stats per queue      = Y
 Extended stats       = Y
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 2b160cf..b1942b5 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -113,6 +113,7 @@  New Features
   * Added support for Rx inject.
   * Optimized SW external mbuf free for better performance and avoid SQ corruption.
   * Added support for port representors.
+  * Added support for ``rte_eth_tx_queue_count``.
 
 * **Updated Marvell OCTEON EP driver.**
 
diff --git a/drivers/net/cnxk/cn10k_tx_select.c b/drivers/net/cnxk/cn10k_tx_select.c
index 404f5ba..aa0620e 100644
--- a/drivers/net/cnxk/cn10k_tx_select.c
+++ b/drivers/net/cnxk/cn10k_tx_select.c
@@ -20,6 +20,24 @@ 
 			eth_dev->tx_pkt_burst;
 }
 
+#if defined(RTE_ARCH_ARM64)
+static int
+cn10k_nix_tx_queue_count(void *tx_queue)
+{
+	struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
+
+	return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
+}
+
+static int
+cn10k_nix_tx_queue_sec_count(void *tx_queue)
+{
+	struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
+
+	return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
+}
+#endif
+
 void
 cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 {
@@ -63,6 +81,10 @@ 
 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+		eth_dev->tx_queue_count = cn10k_nix_tx_queue_sec_count;
+	else
+		eth_dev->tx_queue_count = cn10k_nix_tx_queue_count;
 
 	rte_mb();
 #else
diff --git a/drivers/net/cnxk/cn9k_tx_select.c b/drivers/net/cnxk/cn9k_tx_select.c
index e08883f..5ecf919 100644
--- a/drivers/net/cnxk/cn9k_tx_select.c
+++ b/drivers/net/cnxk/cn9k_tx_select.c
@@ -20,6 +20,24 @@ 
 			eth_dev->tx_pkt_burst;
 }
 
+#if defined(RTE_ARCH_ARM64)
+static int
+cn9k_nix_tx_queue_count(void *tx_queue)
+{
+	struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
+
+	return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
+}
+
+static int
+cn9k_nix_tx_queue_sec_count(void *tx_queue)
+{
+	struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
+
+	return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
+}
+#endif
+
 void
 cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 {
@@ -59,6 +77,11 @@ 
 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
 	}
+	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+		eth_dev->tx_queue_count = cn9k_nix_tx_queue_sec_count;
+	else
+		eth_dev->tx_queue_count = cn9k_nix_tx_queue_count;
+
 
 	rte_mb();
 #else
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5d42e13..5e04064 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -464,6 +464,31 @@  struct cnxk_eth_txq_sp {
 	return ((struct cnxk_eth_txq_sp *)__txq) - 1;
 }
 
+static inline int
+cnxk_nix_tx_queue_count(uint64_t *mem, uint16_t sqes_per_sqb_log2)
+{
+	uint64_t val;
+
+	val = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)mem, rte_memory_order_relaxed);
+	val = (val << sqes_per_sqb_log2) - val;
+
+	return (val & 0xFFFF);
+}
+
+static inline int
+cnxk_nix_tx_queue_sec_count(uint64_t *mem, uint16_t sqes_per_sqb_log2, uint64_t *sec_fc)
+{
+	uint64_t sq_cnt, sec_cnt, val;
+
+	sq_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)mem, rte_memory_order_relaxed);
+	sq_cnt = (sq_cnt << sqes_per_sqb_log2) - sq_cnt;
+	sec_cnt = rte_atomic_load_explicit((RTE_ATOMIC(uint64_t)*)sec_fc,
+					   rte_memory_order_relaxed);
+	val = RTE_MAX(sq_cnt, sec_cnt);
+
+	return (val & 0xFFFF);
+}
+
 /* Common ethdev ops */
 extern struct eth_dev_ops cnxk_eth_dev_ops;