[10/12] net/cnxk: resize CQ for Rx security for errata

Message ID 20220616070743.30658-10-ndabilpuram@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Jerin Jacob
Headers
Series [01/12] common/cnxk: use computed value for wqe skip |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Nithin Dabilpuram June 16, 2022, 7:07 a.m. UTC
  Resize CQ for Rx security offload in case of HW errata.

ci: skip_checkpatch skip_klocwork

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c | 43 +++++++++++++++++++++++++++++++++++++++++-
 drivers/net/cnxk/cnxk_ethdev.h |  2 +-
 2 files changed, 43 insertions(+), 2 deletions(-)
  

Comments

Jerin Jacob June 16, 2022, 8:50 a.m. UTC | #1
On Thu, Jun 16, 2022 at 12:40 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> Resize CQ for Rx security offload in case of HW errata.
>
> ci: skip_checkpatch skip_klocwork

Remove this.

Please fix any ./devtools/checkpatches.sh ./devtools/check-git-log.sh
in issues in the series.

>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> ---
>  drivers/net/cnxk/cnxk_ethdev.c | 43 +++++++++++++++++++++++++++++++++++++++++-
>  drivers/net/cnxk/cnxk_ethdev.h |  2 +-
>  2 files changed, 43 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
> index 4ea1617..2418290 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.c
> +++ b/drivers/net/cnxk/cnxk_ethdev.c
> @@ -5,6 +5,8 @@
>
>  #include <rte_eventdev.h>
>
> +#define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL)
> +
>  static inline uint64_t
>  nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
>  {
> @@ -40,6 +42,39 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
>         return speed_capa;
>  }
>
> +static uint32_t
> +nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp,
> +                      uint32_t nb_desc)
> +{
> +       struct roc_nix_rq *inl_rq;
> +       uint64_t limit;
> +
> +       if (!roc_errata_cpt_hang_on_x2p_bp())
> +               return nb_desc;
> +
> +       /* CQ should be able to hold all buffers in first pass RQ's aura
> +        * this RQ's aura.
> +        */
> +       inl_rq = roc_nix_inl_dev_rq(nix);
> +       if (!inl_rq) {
> +               /* This itself is going to be inline RQ's aura */
> +               limit = roc_npa_aura_op_limit_get(mp->pool_id);
> +       } else {
> +               limit = roc_npa_aura_op_limit_get(inl_rq->aura_handle);
> +               /* Also add this RQ's aura if it is different */
> +               if (inl_rq->aura_handle != mp->pool_id)
> +                       limit += roc_npa_aura_op_limit_get(mp->pool_id);
> +       }
> +       nb_desc = PLT_MAX(limit + 1, nb_desc);
> +       if (nb_desc > CNXK_NIX_CQ_INL_CLAMP_MAX) {
> +               plt_warn("Could not setup CQ size to accommodate"
> +                        " all buffers in related auras (%" PRIu64 ")",
> +                        limit);
> +               nb_desc = CNXK_NIX_CQ_INL_CLAMP_MAX;
> +       }
> +       return nb_desc;
> +}
> +
>  int
>  cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
>  {
> @@ -504,7 +539,7 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
>
>  int
>  cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
> -                       uint16_t nb_desc, uint16_t fp_rx_q_sz,
> +                       uint32_t nb_desc, uint16_t fp_rx_q_sz,
>                         const struct rte_eth_rxconf *rx_conf,
>                         struct rte_mempool *mp)
>  {
> @@ -552,6 +587,12 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
>             dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
>                 roc_nix_inl_dev_xaq_realloc(mp->pool_id);
>
> +       /* Increase CQ size to Aura size to avoid CQ overflow and
> +        * then CPT buffer leak.
> +        */
> +       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
> +               nb_desc = nix_inl_cq_sz_clamp_up(nix, mp, nb_desc);
> +
>         /* Setup ROC CQ */
>         cq = &dev->cqs[qid];
>         cq->qid = qid;
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index a4e96f0..4cb7c9e 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -530,7 +530,7 @@ int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
>                             uint16_t nb_desc, uint16_t fp_tx_q_sz,
>                             const struct rte_eth_txconf *tx_conf);
>  int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
> -                           uint16_t nb_desc, uint16_t fp_rx_q_sz,
> +                           uint32_t nb_desc, uint16_t fp_rx_q_sz,
>                             const struct rte_eth_rxconf *rx_conf,
>                             struct rte_mempool *mp);
>  int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
> --
> 2.8.4
>
  

Patch

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 4ea1617..2418290 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -5,6 +5,8 @@ 
 
 #include <rte_eventdev.h>
 
+#define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL)
+
 static inline uint64_t
 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 {
@@ -40,6 +42,39 @@  nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	return speed_capa;
 }
 
+static uint32_t
+nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp,
+		       uint32_t nb_desc)
+{
+	struct roc_nix_rq *inl_rq;
+	uint64_t limit;
+
+	if (!roc_errata_cpt_hang_on_x2p_bp())
+		return nb_desc;
+
+	/* CQ should be able to hold all buffers in first pass RQ's aura
+	 * this RQ's aura.
+	 */
+	inl_rq = roc_nix_inl_dev_rq(nix);
+	if (!inl_rq) {
+		/* This itself is going to be inline RQ's aura */
+		limit = roc_npa_aura_op_limit_get(mp->pool_id);
+	} else {
+		limit = roc_npa_aura_op_limit_get(inl_rq->aura_handle);
+		/* Also add this RQ's aura if it is different */
+		if (inl_rq->aura_handle != mp->pool_id)
+			limit += roc_npa_aura_op_limit_get(mp->pool_id);
+	}
+	nb_desc = PLT_MAX(limit + 1, nb_desc);
+	if (nb_desc > CNXK_NIX_CQ_INL_CLAMP_MAX) {
+		plt_warn("Could not setup CQ size to accommodate"
+			 " all buffers in related auras (%" PRIu64 ")",
+			 limit);
+		nb_desc = CNXK_NIX_CQ_INL_CLAMP_MAX;
+	}
+	return nb_desc;
+}
+
 int
 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
 {
@@ -504,7 +539,7 @@  cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 
 int
 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
-			uint16_t nb_desc, uint16_t fp_rx_q_sz,
+			uint32_t nb_desc, uint16_t fp_rx_q_sz,
 			const struct rte_eth_rxconf *rx_conf,
 			struct rte_mempool *mp)
 {
@@ -552,6 +587,12 @@  cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	    dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		roc_nix_inl_dev_xaq_realloc(mp->pool_id);
 
+	/* Increase CQ size to Aura size to avoid CQ overflow and
+	 * then CPT buffer leak.
+	 */
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+		nb_desc = nix_inl_cq_sz_clamp_up(nix, mp, nb_desc);
+
 	/* Setup ROC CQ */
 	cq = &dev->cqs[qid];
 	cq->qid = qid;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index a4e96f0..4cb7c9e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -530,7 +530,7 @@  int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			    uint16_t nb_desc, uint16_t fp_tx_q_sz,
 			    const struct rte_eth_txconf *tx_conf);
 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
-			    uint16_t nb_desc, uint16_t fp_rx_q_sz,
+			    uint32_t nb_desc, uint16_t fp_rx_q_sz,
 			    const struct rte_eth_rxconf *rx_conf,
 			    struct rte_mempool *mp);
 int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);