[1/2] event/cnxk: remove deschedule usage in CN9K

Message ID 20220219121338.2438-1-pbhagavatula@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [1/2] event/cnxk: remove deschedule usage in CN9K |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Pavan Nikhilesh Bhagavatula Feb. 19, 2022, 12:13 p.m. UTC
  From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Using deschedule cmd might incorrectly ignore updates to WQE, GGRP
on CN9K.
Use addwork to pipeline work instead.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn9k_worker.h | 41 +++++++++++++++++++++++++-------
 1 file changed, 32 insertions(+), 9 deletions(-)
  

Comments

Jerin Jacob Feb. 22, 2022, 9:51 a.m. UTC | #1
On Sat, Feb 19, 2022 at 6:05 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Using deschedule cmd might incorrectly ignore updates to WQE, GGRP
> on CN9K.
> Use addwork to pipeline work instead.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>


Series applied to dpdk-next-net-eventdev/for-main. Thanks

> ---
>  drivers/event/cnxk/cn9k_worker.h | 41 +++++++++++++++++++++++++-------
>  1 file changed, 32 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
> index 79374b8d95..0905d744cc 100644
> --- a/drivers/event/cnxk/cn9k_worker.h
> +++ b/drivers/event/cnxk/cn9k_worker.h
> @@ -63,15 +63,18 @@ cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
>  }
>
>  static __rte_always_inline void
> -cn9k_sso_hws_fwd_group(uint64_t base, const struct rte_event *ev,
> -                      const uint16_t grp)
> +cn9k_sso_hws_new_event_wait(struct cn9k_sso_hws *ws, const struct rte_event *ev)
>  {
>         const uint32_t tag = (uint32_t)ev->event;
>         const uint8_t new_tt = ev->sched_type;
> +       const uint64_t event_ptr = ev->u64;
> +       const uint16_t grp = ev->queue_id;
>
> -       plt_write64(ev->u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
> -       cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
> -                                  base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
> +       while (ws->xaq_lmt <= __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED))
> +               ;
> +
> +       cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
> +                             ws->grp_base + (grp << 12));
>  }
>
>  static __rte_always_inline void
> @@ -86,10 +89,12 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
>         } else {
>                 /*
>                  * Group has been changed for group based work pipelining,
> -                * Use deschedule/add_work operation to transfer the event to
> +                * Use add_work operation to transfer the event to
>                  * new group/core
>                  */
> -               cn9k_sso_hws_fwd_group(ws->base, ev, grp);
> +               rte_atomic_thread_fence(__ATOMIC_RELEASE);
> +               roc_sso_hws_head_wait(ws->base);
> +               cn9k_sso_hws_new_event_wait(ws, ev);
>         }
>  }
>
> @@ -113,6 +118,22 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
>         return 1;
>  }
>
> +static __rte_always_inline void
> +cn9k_sso_hws_dual_new_event_wait(struct cn9k_sso_hws_dual *dws,
> +                                const struct rte_event *ev)
> +{
> +       const uint32_t tag = (uint32_t)ev->event;
> +       const uint8_t new_tt = ev->sched_type;
> +       const uint64_t event_ptr = ev->u64;
> +       const uint16_t grp = ev->queue_id;
> +
> +       while (dws->xaq_lmt <= __atomic_load_n(dws->fc_mem, __ATOMIC_RELAXED))
> +               ;
> +
> +       cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
> +                             dws->grp_base + (grp << 12));
> +}
> +
>  static __rte_always_inline void
>  cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
>                                 const struct rte_event *ev)
> @@ -126,10 +147,12 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
>         } else {
>                 /*
>                  * Group has been changed for group based work pipelining,
> -                * Use deschedule/add_work operation to transfer the event to
> +                * Use add_work operation to transfer the event to
>                  * new group/core
>                  */
> -               cn9k_sso_hws_fwd_group(base, ev, grp);
> +               rte_atomic_thread_fence(__ATOMIC_RELEASE);
> +               roc_sso_hws_head_wait(base);
> +               cn9k_sso_hws_dual_new_event_wait(dws, ev);
>         }
>  }
>
> --
> 2.17.1
>
  

Patch

diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 79374b8d95..0905d744cc 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -63,15 +63,18 @@  cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
 }
 
 static __rte_always_inline void
-cn9k_sso_hws_fwd_group(uint64_t base, const struct rte_event *ev,
-		       const uint16_t grp)
+cn9k_sso_hws_new_event_wait(struct cn9k_sso_hws *ws, const struct rte_event *ev)
 {
 	const uint32_t tag = (uint32_t)ev->event;
 	const uint8_t new_tt = ev->sched_type;
+	const uint64_t event_ptr = ev->u64;
+	const uint16_t grp = ev->queue_id;
 
-	plt_write64(ev->u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
-	cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
-				   base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
+	while (ws->xaq_lmt <= __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED))
+		;
+
+	cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
+			      ws->grp_base + (grp << 12));
 }
 
 static __rte_always_inline void
@@ -86,10 +89,12 @@  cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
 	} else {
 		/*
 		 * Group has been changed for group based work pipelining,
-		 * Use deschedule/add_work operation to transfer the event to
+		 * Use add_work operation to transfer the event to
 		 * new group/core
 		 */
-		cn9k_sso_hws_fwd_group(ws->base, ev, grp);
+		rte_atomic_thread_fence(__ATOMIC_RELEASE);
+		roc_sso_hws_head_wait(ws->base);
+		cn9k_sso_hws_new_event_wait(ws, ev);
 	}
 }
 
@@ -113,6 +118,22 @@  cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
 	return 1;
 }
 
+static __rte_always_inline void
+cn9k_sso_hws_dual_new_event_wait(struct cn9k_sso_hws_dual *dws,
+				 const struct rte_event *ev)
+{
+	const uint32_t tag = (uint32_t)ev->event;
+	const uint8_t new_tt = ev->sched_type;
+	const uint64_t event_ptr = ev->u64;
+	const uint16_t grp = ev->queue_id;
+
+	while (dws->xaq_lmt <= __atomic_load_n(dws->fc_mem, __ATOMIC_RELAXED))
+		;
+
+	cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
+			      dws->grp_base + (grp << 12));
+}
+
 static __rte_always_inline void
 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
 				const struct rte_event *ev)
@@ -126,10 +147,12 @@  cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
 	} else {
 		/*
 		 * Group has been changed for group based work pipelining,
-		 * Use deschedule/add_work operation to transfer the event to
+		 * Use add_work operation to transfer the event to
 		 * new group/core
 		 */
-		cn9k_sso_hws_fwd_group(base, ev, grp);
+		rte_atomic_thread_fence(__ATOMIC_RELEASE);
+		roc_sso_hws_head_wait(base);
+		cn9k_sso_hws_dual_new_event_wait(dws, ev);
 	}
 }