[v3,1/5] event/octeontx2: fix TIM HW race condition

Message ID 20191122154431.17416-1-pbhagavatula@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [v3,1/5] event/octeontx2: fix TIM HW race condition |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-compilation success Compile Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation fail Compilation issues

Commit Message

Pavan Nikhilesh Bhagavatula Nov. 22, 2019, 3:44 p.m. UTC
  From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Fix HW race condition observed when timeout resolution is low (<5us).
When HW traverses a given TIM bucket it will clear chunk_remainder,
but since SW always decreases the chunk_remainder at the start of the
arm routine it might cause a race where SW updates chunk_remainder
after HW has cleared it that lead to nasty side effects.

Fixes: 95e4e4ec7469 ("event/octeontx2: add timer arm timeout burst")

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/octeontx2/otx2_tim_worker.h | 141 +++++++++++++++++++---
 1 file changed, 124 insertions(+), 17 deletions(-)
  

Comments

Pavan Nikhilesh Bhagavatula Nov. 22, 2019, 4:07 p.m. UTC | #1
+Cc: stable@dpdk.org

>-----Original Message-----
>From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
>Sent: Friday, November 22, 2019 9:14 PM
>To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh
>Bhagavatula <pbhagavatula@marvell.com>
>Cc: dev@dpdk.org
>Subject: [dpdk-dev] [PATCH v3 1/5] event/octeontx2: fix TIM HW race
>condition
>
>From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
>Fix HW race condition observed when timeout resolution is low (<5us).
>When HW traverses a given TIM bucket it will clear chunk_remainder,
>but since SW always decreases the chunk_remainder at the start of the
>arm routine it might cause a race where SW updates chunk_remainder
>after HW has cleared it that lead to nasty side effects.
>
>Fixes: 95e4e4ec7469 ("event/octeontx2: add timer arm timeout burst")
>
>Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>---
> drivers/event/octeontx2/otx2_tim_worker.h | 141
>+++++++++++++++++++---
> 1 file changed, 124 insertions(+), 17 deletions(-)
>
>diff --git a/drivers/event/octeontx2/otx2_tim_worker.h
>b/drivers/event/octeontx2/otx2_tim_worker.h
>index 50db6543c..c896b5433 100644
>--- a/drivers/event/octeontx2/otx2_tim_worker.h
>+++ b/drivers/event/octeontx2/otx2_tim_worker.h
>@@ -7,6 +7,13 @@
>
> #include "otx2_tim_evdev.h"
>
>+static inline uint8_t
>+tim_bkt_fetch_lock(uint64_t w1)
>+{
>+	return (w1 >> TIM_BUCKET_W1_S_LOCK) &
>+		TIM_BUCKET_W1_M_LOCK;
>+}
>+
> static inline int16_t
> tim_bkt_fetch_rem(uint64_t w1)
> {
>@@ -188,7 +195,6 @@ tim_insert_chunk(struct otx2_tim_bkt * const
>bkt,
> 	} else {
> 		bkt->first_chunk = (uintptr_t)chunk;
> 	}
>-
> 	return chunk;
> }
>
>@@ -208,11 +214,38 @@ tim_add_entry_sp(struct otx2_tim_ring *
>const tim_ring,
>
> __retry:
> 	/* Get Bucket sema*/
>-	lock_sema = tim_bkt_fetch_sema(bkt);
>+	lock_sema = tim_bkt_fetch_sema_lock(bkt);
>
> 	/* Bucket related checks. */
>-	if (unlikely(tim_bkt_get_hbt(lock_sema)))
>-		goto __retry;
>+	if (unlikely(tim_bkt_get_hbt(lock_sema))) {
>+		if (tim_bkt_get_nent(lock_sema) != 0) {
>+			uint64_t hbt_state;
>+#ifdef RTE_ARCH_ARM64
>+			asm volatile(
>+					"	ldaxr %[hbt], [%[w1]]
>	\n"
>+					"	tbz %[hbt], 33, dne%=
>	\n"
>+					"	sevl
>	\n"
>+					"rty%=: wfe
>	\n"
>+					"	ldaxr %[hbt], [%[w1]]
>	\n"
>+					"	tbnz %[hbt], 33, rty%=
>	\n"
>+					"dne%=:
>	\n"
>+					: [hbt] "=&r" (hbt_state)
>+					: [w1] "r" ((&bkt->w1))
>+					: "memory"
>+					);
>+#else
>+			do {
>+				hbt_state = __atomic_load_n(&bkt-
>>w1,
>+						__ATOMIC_ACQUIRE);
>+			} while (hbt_state & BIT_ULL(33));
>+#endif
>+
>+			if (!(hbt_state & BIT_ULL(34))) {
>+				tim_bkt_dec_lock(bkt);
>+				goto __retry;
>+			}
>+		}
>+	}
>
> 	/* Insert the work. */
> 	rem = tim_bkt_fetch_rem(lock_sema);
>@@ -224,14 +257,15 @@ tim_add_entry_sp(struct otx2_tim_ring *
>const tim_ring,
> 			chunk = tim_insert_chunk(bkt, tim_ring);
>
> 		if (unlikely(chunk == NULL)) {
>-			tim_bkt_set_rem(bkt, 0);
>+			bkt->chunk_remainder = 0;
>+			tim_bkt_dec_lock(bkt);
> 			tim->impl_opaque[0] = 0;
> 			tim->impl_opaque[1] = 0;
> 			tim->state = RTE_EVENT_TIMER_ERROR;
> 			return -ENOMEM;
> 		}
> 		bkt->current_chunk = (uintptr_t)chunk;
>-		tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
>+		bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
> 	} else {
> 		chunk = (struct otx2_tim_ent *)(uintptr_t)bkt-
>>current_chunk;
> 		chunk += tim_ring->nb_chunk_slots - rem;
>@@ -241,6 +275,7 @@ tim_add_entry_sp(struct otx2_tim_ring * const
>tim_ring,
> 	*chunk = *pent;
>
> 	tim_bkt_inc_nent(bkt);
>+	tim_bkt_dec_lock(bkt);
>
> 	tim->impl_opaque[0] = (uintptr_t)chunk;
> 	tim->impl_opaque[1] = (uintptr_t)bkt;
>@@ -263,19 +298,60 @@ tim_add_entry_mp(struct otx2_tim_ring *
>const tim_ring,
>
> __retry:
> 	bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
>-
> 	/* Get Bucket sema*/
> 	lock_sema = tim_bkt_fetch_sema_lock(bkt);
>
> 	/* Bucket related checks. */
> 	if (unlikely(tim_bkt_get_hbt(lock_sema))) {
>-		tim_bkt_dec_lock(bkt);
>-		goto __retry;
>+		if (tim_bkt_get_nent(lock_sema) != 0) {
>+			uint64_t hbt_state;
>+#ifdef RTE_ARCH_ARM64
>+			asm volatile(
>+					"	ldaxr %[hbt], [%[w1]]
>	\n"
>+					"	tbz %[hbt], 33, dne%=
>	\n"
>+					"	sevl
>	\n"
>+					"rty%=: wfe
>	\n"
>+					"	ldaxr %[hbt], [%[w1]]
>	\n"
>+					"	tbnz %[hbt], 33, rty%=
>	\n"
>+					"dne%=:
>	\n"
>+					: [hbt] "=&r" (hbt_state)
>+					: [w1] "r" ((&bkt->w1))
>+					: "memory"
>+					);
>+#else
>+			do {
>+				hbt_state = __atomic_load_n(&bkt-
>>w1,
>+						__ATOMIC_ACQUIRE);
>+			} while (hbt_state & BIT_ULL(33));
>+#endif
>+
>+			if (!(hbt_state & BIT_ULL(34))) {
>+				tim_bkt_dec_lock(bkt);
>+				goto __retry;
>+			}
>+		}
> 	}
>
> 	rem = tim_bkt_fetch_rem(lock_sema);
>-
> 	if (rem < 0) {
>+#ifdef RTE_ARCH_ARM64
>+		asm volatile(
>+				"	ldaxrh %w[rem], [%[crem]]
>	\n"
>+				"	tbz %w[rem], 15, dne%=
>	\n"
>+				"	sevl
>	\n"
>+				"rty%=: wfe
>	\n"
>+				"	ldaxrh %w[rem], [%[crem]]
>	\n"
>+				"	tbnz %w[rem], 15, rty%=
>	\n"
>+				"dne%=:
>	\n"
>+				: [rem] "=&r" (rem)
>+				: [crem] "r" (&bkt->chunk_remainder)
>+				: "memory"
>+			    );
>+#else
>+		while (__atomic_load_n(&bkt->chunk_remainder,
>+				       __ATOMIC_ACQUIRE) < 0)
>+			;
>+#endif
> 		/* Goto diff bucket. */
> 		tim_bkt_dec_lock(bkt);
> 		goto __retry;
>@@ -294,17 +370,23 @@ tim_add_entry_mp(struct otx2_tim_ring *
>const tim_ring,
> 			tim->state = RTE_EVENT_TIMER_ERROR;
> 			return -ENOMEM;
> 		}
>-		bkt->current_chunk = (uintptr_t)chunk;
>-		tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
>+		*chunk = *pent;
>+		while (tim_bkt_fetch_lock(lock_sema) !=
>+				(-tim_bkt_fetch_rem(lock_sema)))
>+			lock_sema = __atomic_load_n(&bkt->w1,
>__ATOMIC_ACQUIRE);
>+
>+		bkt->current_chunk =  (uintptr_t)chunk;
>+		__atomic_store_n(&bkt->chunk_remainder,
>+				tim_ring->nb_chunk_slots - 1,
>__ATOMIC_RELEASE);
> 	} else {
>-		chunk = (struct otx2_tim_ent *)(uintptr_t)bkt-
>>current_chunk;
>+		chunk = (struct otx2_tim_ent *)bkt->current_chunk;
> 		chunk += tim_ring->nb_chunk_slots - rem;
>+		*chunk = *pent;
> 	}
>
> 	/* Copy work entry. */
>-	*chunk = *pent;
>-	tim_bkt_dec_lock(bkt);
> 	tim_bkt_inc_nent(bkt);
>+	tim_bkt_dec_lock(bkt);
> 	tim->impl_opaque[0] = (uintptr_t)chunk;
> 	tim->impl_opaque[1] = (uintptr_t)bkt;
> 	tim->state = RTE_EVENT_TIMER_ARMED;
>@@ -360,8 +442,33 @@ tim_add_entry_brst(struct otx2_tim_ring *
>const tim_ring,
>
> 	/* Bucket related checks. */
> 	if (unlikely(tim_bkt_get_hbt(lock_sema))) {
>-		tim_bkt_dec_lock(bkt);
>-		goto __retry;
>+		if (tim_bkt_get_nent(lock_sema) != 0) {
>+			uint64_t hbt_state;
>+#ifdef RTE_ARCH_ARM64
>+			asm volatile(
>+					"	ldaxr %[hbt], [%[w1]]
>	\n"
>+					"	tbz %[hbt], 33, dne%=
>	\n"
>+					"	sevl
>	\n"
>+					"rty%=: wfe
>	\n"
>+					"	ldaxr %[hbt], [%[w1]]
>	\n"
>+					"	tbnz %[hbt], 33, rty%=
>	\n"
>+					"dne%=:
>	\n"
>+					: [hbt] "=&r" (hbt_state)
>+					: [w1] "r" ((&bkt->w1))
>+					: "memory"
>+					);
>+#else
>+			do {
>+				hbt_state = __atomic_load_n(&bkt-
>>w1,
>+						__ATOMIC_ACQUIRE);
>+			} while (hbt_state & BIT_ULL(33));
>+#endif
>+
>+			if (!(hbt_state & BIT_ULL(34))) {
>+				tim_bkt_dec_lock(bkt);
>+				goto __retry;
>+			}
>+		}
> 	}
>
> 	chunk_remainder = tim_bkt_fetch_rem(lock_sema);
>--
>2.17.1
  
Jerin Jacob Nov. 23, 2019, 9 a.m. UTC | #2
On Sat, Nov 23, 2019 at 1:07 AM Pavan Nikhilesh Bhagavatula
<pbhagavatula@marvell.com> wrote:
>
> +Cc: stable@dpdk.org
>
> >-----Original Message-----
> >From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> >Sent: Friday, November 22, 2019 9:14 PM
> >To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh
> >Bhagavatula <pbhagavatula@marvell.com>
> >Cc: dev@dpdk.org
> >Subject: [dpdk-dev] [PATCH v3 1/5] event/octeontx2: fix TIM HW race
> >condition
> >
> >From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >
> >Fix HW race condition observed when timeout resolution is low (<5us).
> >When HW traverses a given TIM bucket it will clear chunk_remainder,
> >but since SW always decreases the chunk_remainder at the start of the
> >arm routine it might cause a race where SW updates chunk_remainder
> >after HW has cleared it that lead to nasty side effects.
> >
> >Fixes: 95e4e4ec7469 ("event/octeontx2: add timer arm timeout burst")
> >
> >Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>

Series applied to dpdk-next-eventdev/master. Thanks.
  

Patch

diff --git a/drivers/event/octeontx2/otx2_tim_worker.h b/drivers/event/octeontx2/otx2_tim_worker.h
index 50db6543c..c896b5433 100644
--- a/drivers/event/octeontx2/otx2_tim_worker.h
+++ b/drivers/event/octeontx2/otx2_tim_worker.h
@@ -7,6 +7,13 @@ 
 
 #include "otx2_tim_evdev.h"
 
+static inline uint8_t
+tim_bkt_fetch_lock(uint64_t w1)
+{
+	return (w1 >> TIM_BUCKET_W1_S_LOCK) &
+		TIM_BUCKET_W1_M_LOCK;
+}
+
 static inline int16_t
 tim_bkt_fetch_rem(uint64_t w1)
 {
@@ -188,7 +195,6 @@  tim_insert_chunk(struct otx2_tim_bkt * const bkt,
 	} else {
 		bkt->first_chunk = (uintptr_t)chunk;
 	}
-
 	return chunk;
 }
 
@@ -208,11 +214,38 @@  tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
 
 __retry:
 	/* Get Bucket sema*/
-	lock_sema = tim_bkt_fetch_sema(bkt);
+	lock_sema = tim_bkt_fetch_sema_lock(bkt);
 
 	/* Bucket related checks. */
-	if (unlikely(tim_bkt_get_hbt(lock_sema)))
-		goto __retry;
+	if (unlikely(tim_bkt_get_hbt(lock_sema))) {
+		if (tim_bkt_get_nent(lock_sema) != 0) {
+			uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+			asm volatile(
+					"	ldaxr %[hbt], [%[w1]]	\n"
+					"	tbz %[hbt], 33, dne%=	\n"
+					"	sevl			\n"
+					"rty%=: wfe			\n"
+					"	ldaxr %[hbt], [%[w1]]	\n"
+					"	tbnz %[hbt], 33, rty%=	\n"
+					"dne%=:				\n"
+					: [hbt] "=&r" (hbt_state)
+					: [w1] "r" ((&bkt->w1))
+					: "memory"
+					);
+#else
+			do {
+				hbt_state = __atomic_load_n(&bkt->w1,
+						__ATOMIC_ACQUIRE);
+			} while (hbt_state & BIT_ULL(33));
+#endif
+
+			if (!(hbt_state & BIT_ULL(34))) {
+				tim_bkt_dec_lock(bkt);
+				goto __retry;
+			}
+		}
+	}
 
 	/* Insert the work. */
 	rem = tim_bkt_fetch_rem(lock_sema);
@@ -224,14 +257,15 @@  tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
 			chunk = tim_insert_chunk(bkt, tim_ring);
 
 		if (unlikely(chunk == NULL)) {
-			tim_bkt_set_rem(bkt, 0);
+			bkt->chunk_remainder = 0;
+			tim_bkt_dec_lock(bkt);
 			tim->impl_opaque[0] = 0;
 			tim->impl_opaque[1] = 0;
 			tim->state = RTE_EVENT_TIMER_ERROR;
 			return -ENOMEM;
 		}
 		bkt->current_chunk = (uintptr_t)chunk;
-		tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
+		bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
 	} else {
 		chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
 		chunk += tim_ring->nb_chunk_slots - rem;
@@ -241,6 +275,7 @@  tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
 	*chunk = *pent;
 
 	tim_bkt_inc_nent(bkt);
+	tim_bkt_dec_lock(bkt);
 
 	tim->impl_opaque[0] = (uintptr_t)chunk;
 	tim->impl_opaque[1] = (uintptr_t)bkt;
@@ -263,19 +298,60 @@  tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
 
 __retry:
 	bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
-
 	/* Get Bucket sema*/
 	lock_sema = tim_bkt_fetch_sema_lock(bkt);
 
 	/* Bucket related checks. */
 	if (unlikely(tim_bkt_get_hbt(lock_sema))) {
-		tim_bkt_dec_lock(bkt);
-		goto __retry;
+		if (tim_bkt_get_nent(lock_sema) != 0) {
+			uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+			asm volatile(
+					"	ldaxr %[hbt], [%[w1]]	\n"
+					"	tbz %[hbt], 33, dne%=	\n"
+					"	sevl			\n"
+					"rty%=: wfe			\n"
+					"	ldaxr %[hbt], [%[w1]]	\n"
+					"	tbnz %[hbt], 33, rty%=	\n"
+					"dne%=:				\n"
+					: [hbt] "=&r" (hbt_state)
+					: [w1] "r" ((&bkt->w1))
+					: "memory"
+					);
+#else
+			do {
+				hbt_state = __atomic_load_n(&bkt->w1,
+						__ATOMIC_ACQUIRE);
+			} while (hbt_state & BIT_ULL(33));
+#endif
+
+			if (!(hbt_state & BIT_ULL(34))) {
+				tim_bkt_dec_lock(bkt);
+				goto __retry;
+			}
+		}
 	}
 
 	rem = tim_bkt_fetch_rem(lock_sema);
-
 	if (rem < 0) {
+#ifdef RTE_ARCH_ARM64
+		asm volatile(
+				"	ldaxrh %w[rem], [%[crem]]	\n"
+				"	tbz %w[rem], 15, dne%=		\n"
+				"	sevl				\n"
+				"rty%=: wfe				\n"
+				"	ldaxrh %w[rem], [%[crem]]	\n"
+				"	tbnz %w[rem], 15, rty%=		\n"
+				"dne%=:					\n"
+				: [rem] "=&r" (rem)
+				: [crem] "r" (&bkt->chunk_remainder)
+				: "memory"
+			    );
+#else
+		while (__atomic_load_n(&bkt->chunk_remainder,
+				       __ATOMIC_ACQUIRE) < 0)
+			;
+#endif
 		/* Goto diff bucket. */
 		tim_bkt_dec_lock(bkt);
 		goto __retry;
@@ -294,17 +370,23 @@  tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
 			tim->state = RTE_EVENT_TIMER_ERROR;
 			return -ENOMEM;
 		}
-		bkt->current_chunk = (uintptr_t)chunk;
-		tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
+		*chunk = *pent;
+		while (tim_bkt_fetch_lock(lock_sema) !=
+				(-tim_bkt_fetch_rem(lock_sema)))
+			lock_sema = __atomic_load_n(&bkt->w1, __ATOMIC_ACQUIRE);
+
+		bkt->current_chunk =  (uintptr_t)chunk;
+		__atomic_store_n(&bkt->chunk_remainder,
+				tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE);
 	} else {
-		chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
+		chunk = (struct otx2_tim_ent *)bkt->current_chunk;
 		chunk += tim_ring->nb_chunk_slots - rem;
+		*chunk = *pent;
 	}
 
 	/* Copy work entry. */
-	*chunk = *pent;
-	tim_bkt_dec_lock(bkt);
 	tim_bkt_inc_nent(bkt);
+	tim_bkt_dec_lock(bkt);
 	tim->impl_opaque[0] = (uintptr_t)chunk;
 	tim->impl_opaque[1] = (uintptr_t)bkt;
 	tim->state = RTE_EVENT_TIMER_ARMED;
@@ -360,8 +442,33 @@  tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
 
 	/* Bucket related checks. */
 	if (unlikely(tim_bkt_get_hbt(lock_sema))) {
-		tim_bkt_dec_lock(bkt);
-		goto __retry;
+		if (tim_bkt_get_nent(lock_sema) != 0) {
+			uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+			asm volatile(
+					"	ldaxr %[hbt], [%[w1]]	\n"
+					"	tbz %[hbt], 33, dne%=	\n"
+					"	sevl			\n"
+					"rty%=: wfe			\n"
+					"	ldaxr %[hbt], [%[w1]]	\n"
+					"	tbnz %[hbt], 33, rty%=	\n"
+					"dne%=:				\n"
+					: [hbt] "=&r" (hbt_state)
+					: [w1] "r" ((&bkt->w1))
+					: "memory"
+					);
+#else
+			do {
+				hbt_state = __atomic_load_n(&bkt->w1,
+						__ATOMIC_ACQUIRE);
+			} while (hbt_state & BIT_ULL(33));
+#endif
+
+			if (!(hbt_state & BIT_ULL(34))) {
+				tim_bkt_dec_lock(bkt);
+				goto __retry;
+			}
+		}
 	}
 
 	chunk_remainder = tim_bkt_fetch_rem(lock_sema);