diff --git a/app/tests/thread_tests.c b/app/tests/thread_tests.c index fb5944b9..5f319697 100644 --- a/app/tests/thread_tests.c +++ b/app/tests/thread_tests.c @@ -558,12 +558,14 @@ static void spinlock_test(void) { printf("seems to work\n"); #define COUNT (1024*1024) + arch_interrupt_save(&state, SPIN_LOCK_FLAG_INTERRUPTS); uint32_t c = arch_cycle_count(); for (uint i = 0; i < COUNT; i++) { spin_lock(&lock); spin_unlock(&lock); } c = arch_cycle_count() - c; + arch_interrupt_restore(state, SPIN_LOCK_FLAG_INTERRUPTS); printf("%u cycles to acquire/release lock %u times (%u cycles per)\n", c, COUNT, c / COUNT); diff --git a/arch/arm/include/arch/spinlock.h b/arch/arm/include/arch/spinlock.h index dcd1d033..48a21fcb 100644 --- a/arch/arm/include/arch/spinlock.h +++ b/arch/arm/include/arch/spinlock.h @@ -8,6 +8,7 @@ #pragma once #include +#include #include #include @@ -36,15 +37,22 @@ void arch_spin_unlock(spin_lock_t *lock); #else +/* Non-SMP spinlocks are mostly vestigial to try to catch pending locking problems. */ static inline void arch_spin_lock(spin_lock_t *lock) { + DEBUG_ASSERT(arch_ints_disabled()); + DEBUG_ASSERT(*lock == 0); *lock = 1; } static inline int arch_spin_trylock(spin_lock_t *lock) { + DEBUG_ASSERT(arch_ints_disabled()); + DEBUG_ASSERT(*lock == 0); return 0; } static inline void arch_spin_unlock(spin_lock_t *lock) { + DEBUG_ASSERT(arch_ints_disabled()); + DEBUG_ASSERT(*lock != 0); *lock = 0; }