From 11a39f545bdf2d37f6dfa490263dbf0e9b3384a8 Mon Sep 17 00:00:00 2001 From: Travis Geiselbrecht Date: Wed, 6 Oct 2021 23:20:48 -0700 Subject: [PATCH] [arch][arm] add debug asserts to the non SMP spinlock routines This should assert that the spinlock is not already held when acquiring and vice-versa and that interrupts are disabled. --- app/tests/thread_tests.c | 2 ++ arch/arm/include/arch/spinlock.h | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/app/tests/thread_tests.c b/app/tests/thread_tests.c index fb5944b9..5f319697 100644 --- a/app/tests/thread_tests.c +++ b/app/tests/thread_tests.c @@ -558,12 +558,14 @@ static void spinlock_test(void) { printf("seems to work\n"); #define COUNT (1024*1024) + arch_interrupt_save(&state, SPIN_LOCK_FLAG_INTERRUPTS); uint32_t c = arch_cycle_count(); for (uint i = 0; i < COUNT; i++) { spin_lock(&lock); spin_unlock(&lock); } c = arch_cycle_count() - c; + arch_interrupt_restore(state, SPIN_LOCK_FLAG_INTERRUPTS); printf("%u cycles to acquire/release lock %u times (%u cycles per)\n", c, COUNT, c / COUNT); diff --git a/arch/arm/include/arch/spinlock.h b/arch/arm/include/arch/spinlock.h index dcd1d033..48a21fcb 100644 --- a/arch/arm/include/arch/spinlock.h +++ b/arch/arm/include/arch/spinlock.h @@ -8,6 +8,7 @@ #pragma once #include +#include #include #include @@ -36,15 +37,22 @@ void arch_spin_unlock(spin_lock_t *lock); #else +/* Non-SMP spinlocks are mostly vestigial to try to catch pending locking problems. */ static inline void arch_spin_lock(spin_lock_t *lock) { + DEBUG_ASSERT(arch_ints_disabled()); + DEBUG_ASSERT(*lock == 0); *lock = 1; } static inline int arch_spin_trylock(spin_lock_t *lock) { + DEBUG_ASSERT(arch_ints_disabled()); + DEBUG_ASSERT(*lock == 0); return 0; } static inline void arch_spin_unlock(spin_lock_t *lock) { + DEBUG_ASSERT(arch_ints_disabled()); + DEBUG_ASSERT(*lock != 0); *lock = 0; }