[arch][riscv] spiff up the spinlock code a bit
Move out of inline routines since the body is relatively large and to keep the disassembly clean. Have spinlocks store the holder cpu + 1 instead of just 1. Add an appropriate barrier to the release.
This commit is contained in:
@@ -9,6 +9,9 @@
|
||||
|
||||
#include <arch/ops.h>
|
||||
#include <stdbool.h>
|
||||
#include <lk/compiler.h>
|
||||
|
||||
__BEGIN_CDECLS
|
||||
|
||||
#define SPIN_LOCK_INITIAL_VALUE (0)
|
||||
|
||||
@@ -17,29 +20,20 @@ typedef volatile unsigned int spin_lock_t;
|
||||
typedef unsigned long spin_lock_saved_state_t;
|
||||
typedef unsigned int spin_lock_save_flags_t;
|
||||
|
||||
void riscv_spin_lock(spin_lock_t *lock);
|
||||
void riscv_spin_unlock(spin_lock_t *lock);
|
||||
int riscv_spin_trylock(spin_lock_t *lock);
|
||||
|
||||
static inline int arch_spin_trylock(spin_lock_t *lock) {
|
||||
int tmp = 1, busy;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" amoswap.w %0, %2, %1\n"
|
||||
" fence r , rw\n"
|
||||
: "=r"(busy), "+A"(*lock)
|
||||
: "r" (tmp)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return !busy;
|
||||
return riscv_spin_trylock(lock);
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(spin_lock_t *lock) {
|
||||
while (1) {
|
||||
if (*lock) continue;
|
||||
if (arch_spin_trylock(lock)) break;
|
||||
}
|
||||
riscv_spin_lock(lock);
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(spin_lock_t *lock) {
|
||||
*lock = 0;
|
||||
riscv_spin_unlock(lock);
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock_init(spin_lock_t *lock) {
|
||||
@@ -65,3 +59,4 @@ arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t
|
||||
riscv_csr_set(RISCV_CSR_XSTATUS, old_state);
|
||||
}
|
||||
|
||||
__END_CDECLS
|
||||
|
||||
@@ -10,6 +10,7 @@ MODULE_SRCS += $(LOCAL_DIR)/thread.c
|
||||
MODULE_SRCS += $(LOCAL_DIR)/mmu.cpp
|
||||
MODULE_SRCS += $(LOCAL_DIR)/mp.c
|
||||
MODULE_SRCS += $(LOCAL_DIR)/sbi.c
|
||||
MODULE_SRCS += $(LOCAL_DIR)/spinlock.c
|
||||
MODULE_SRCS += $(LOCAL_DIR)/time.c
|
||||
|
||||
# one file uses slightly complicated designated initializer
|
||||
|
||||
54
arch/riscv/spinlock.c
Normal file
54
arch/riscv/spinlock.c
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Travis Geiselbrecht
|
||||
*
|
||||
* Use of this source code is governed by a MIT-style
|
||||
* license that can be found in the LICENSE file or at
|
||||
* https://opensource.org/licenses/MIT
|
||||
*/
|
||||
#include <arch/spinlock.h>
|
||||
|
||||
// super simple spin lock implementation
|
||||
|
||||
int riscv_spin_trylock(spin_lock_t *lock) {
|
||||
unsigned long val = arch_curr_cpu_num() + 1UL;
|
||||
unsigned long old;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"amoswap.w %0, %2, %1\n"
|
||||
"fence r, rw\n"
|
||||
: "=r"(old), "+A"(*lock)
|
||||
: "r" (val)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return !old;
|
||||
}
|
||||
|
||||
void riscv_spin_lock(spin_lock_t *lock) {
|
||||
unsigned long val = arch_curr_cpu_num() + 1UL;
|
||||
|
||||
for (;;) {
|
||||
if (*lock) {
|
||||
continue;
|
||||
}
|
||||
|
||||
unsigned long old;
|
||||
__asm__ __volatile__(
|
||||
"amoswap.w %0, %2, %1\n"
|
||||
"fence r, rw\n"
|
||||
: "=r"(old), "+A"(*lock)
|
||||
: "r" (val)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
if (!old) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void riscv_spin_unlock(spin_lock_t *lock) {
|
||||
__asm__ volatile("fence rw,w" ::: "memory");
|
||||
*lock = 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user