[arch][arm] add debug asserts to the non SMP spinlock routines

This should assert that the spinlock is not already held when acquiring
and vice-versa and that interrupts are disabled.
This commit is contained in:
Travis Geiselbrecht
2021-10-06 23:20:48 -07:00
parent e7c42e22ce
commit 11a39f545b
2 changed files with 10 additions and 0 deletions

View File

@@ -558,12 +558,14 @@ static void spinlock_test(void) {
printf("seems to work\n");
#define COUNT (1024*1024)
arch_interrupt_save(&state, SPIN_LOCK_FLAG_INTERRUPTS);
uint32_t c = arch_cycle_count();
for (uint i = 0; i < COUNT; i++) {
spin_lock(&lock);
spin_unlock(&lock);
}
c = arch_cycle_count() - c;
arch_interrupt_restore(state, SPIN_LOCK_FLAG_INTERRUPTS);
printf("%u cycles to acquire/release lock %u times (%u cycles per)\n", c, COUNT, c / COUNT);

View File

@@ -8,6 +8,7 @@
#pragma once
#include <lk/compiler.h>
#include <assert.h>
#include <arch/ops.h>
#include <stdbool.h>
@@ -36,15 +37,22 @@ void arch_spin_unlock(spin_lock_t *lock);
#else
/* Non-SMP spinlocks are mostly vestigial to try to catch pending locking problems. */
static inline void arch_spin_lock(spin_lock_t *lock) {
DEBUG_ASSERT(arch_ints_disabled());
DEBUG_ASSERT(*lock == 0);
*lock = 1;
}
static inline int arch_spin_trylock(spin_lock_t *lock) {
DEBUG_ASSERT(arch_ints_disabled());
DEBUG_ASSERT(*lock == 0);
return 0;
}
static inline void arch_spin_unlock(spin_lock_t *lock) {
DEBUG_ASSERT(arch_ints_disabled());
DEBUG_ASSERT(*lock != 0);
*lock = 0;
}