[arch] define the atomic routines in arch-neutral headers and use builtins
Generally move most arches over to using the builtin atomics except for the few that still require a little bit of work.
This commit is contained in:
@@ -21,7 +21,6 @@
|
||||
__BEGIN_CDECLS
|
||||
|
||||
#if ARM_ISA_ARMV7 || (ARM_ISA_ARMV6 && !__thumb__)
|
||||
#define USE_GCC_ATOMICS 0
|
||||
#define ENABLE_CYCLE_COUNTER 1
|
||||
|
||||
// override of some routines
|
||||
@@ -81,121 +80,6 @@ static inline bool arch_in_int_handler(void) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int temp;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"adds %[temp], %[old], %[val]\n"
|
||||
"strex %[test], %[temp], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory", "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int temp;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"orrs %[temp], %[old], %[val]\n"
|
||||
"strex %[test], %[temp], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory", "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int temp;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"ands %[temp], %[old], %[val]\n"
|
||||
"strex %[test], %[temp], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory", "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"strex %[test], %[val], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
|
||||
int old;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"mov %[test], #0\n"
|
||||
"teq %[old], %[oldval]\n"
|
||||
#if (ARM_ISA_ARMV7M || __thumb__)
|
||||
"bne 0f\n"
|
||||
"strex %[test], %[newval], [%[ptr]]\n"
|
||||
"0:\n"
|
||||
#else
|
||||
"strexeq %[test], %[newval], [%[ptr]]\n"
|
||||
#endif
|
||||
: [old]"=&r" (old), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [oldval]"Ir" (oldval), [newval]"r" (newval)
|
||||
: "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline uint32_t arch_cycle_count(void) {
|
||||
#if ARM_ISA_ARMV7M
|
||||
#if ENABLE_CYCLE_COUNTER
|
||||
|
||||
@@ -17,7 +17,8 @@ ifeq ($(ARM_CPU),cortex-m0)
|
||||
GLOBAL_DEFINES += \
|
||||
ARM_CPU_CORTEX_M0=1 \
|
||||
ARM_ISA_ARMV6M=1 \
|
||||
ARM_WITH_THUMB=1
|
||||
ARM_WITH_THUMB=1 \
|
||||
USE_BUILTIN_ATOMICS=0
|
||||
HANDLED_CORE := true
|
||||
ENABLE_THUMB := true
|
||||
SUBARCH := arm-m
|
||||
@@ -26,7 +27,8 @@ ifeq ($(ARM_CPU),cortex-m0plus)
|
||||
GLOBAL_DEFINES += \
|
||||
ARM_CPU_CORTEX_M0_PLUS=1 \
|
||||
ARM_ISA_ARMV6M=1 \
|
||||
ARM_WITH_THUMB=1
|
||||
ARM_WITH_THUMB=1 \
|
||||
USE_BUILTIN_ATOMICS=0
|
||||
HANDLED_CORE := true
|
||||
ENABLE_THUMB := true
|
||||
SUBARCH := arm-m
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include <lk/reg.h>
|
||||
#include <arch/arm64.h>
|
||||
|
||||
#define USE_GCC_ATOMICS 1
|
||||
#define ENABLE_CYCLE_COUNTER 1
|
||||
|
||||
// override of some routines
|
||||
@@ -71,127 +70,6 @@ static inline bool arch_fiqs_disabled(void) {
|
||||
#define smp_rmb() CF
|
||||
#endif
|
||||
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int temp;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"adds %[temp], %[old], %[val]\n"
|
||||
"strex %[test], %[temp], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory", "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int temp;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"orrs %[temp], %[old], %[val]\n"
|
||||
"strex %[test], %[temp], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory", "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int temp;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"ands %[temp], %[old], %[val]\n"
|
||||
"strex %[test], %[temp], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory", "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
#if USE_GCC_ATOMICS
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
#else
|
||||
int old;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"strex %[test], %[val], [%[ptr]]\n"
|
||||
: [old]"=&r" (old), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [val]"r" (val)
|
||||
: "memory");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
|
||||
#if USE_GCC_ATOMICS
|
||||
__atomic_compare_exchange_n(ptr, &oldval, newval, false,
|
||||
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
|
||||
return oldval;
|
||||
#else
|
||||
int old;
|
||||
int test;
|
||||
|
||||
do {
|
||||
__asm__ volatile(
|
||||
"ldrex %[old], [%[ptr]]\n"
|
||||
"mov %[test], #0\n"
|
||||
"teq %[old], %[oldval]\n"
|
||||
#if ARM_ISA_ARMV7M
|
||||
"bne 0f\n"
|
||||
"strex %[test], %[newval], [%[ptr]]\n"
|
||||
"0:\n"
|
||||
#else
|
||||
"strexeq %[test], %[newval], [%[ptr]]\n"
|
||||
#endif
|
||||
: [old]"=&r" (old), [test]"=&r" (test)
|
||||
: [ptr]"r" (ptr), [oldval]"Ir" (oldval), [newval]"r" (newval)
|
||||
: "cc");
|
||||
|
||||
} while (test != 0);
|
||||
|
||||
return old;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t arch_cycle_count(void) {
|
||||
#if ARM_ISA_ARM7M
|
||||
#if ENABLE_CYCLE_COUNTER
|
||||
|
||||
@@ -22,10 +22,35 @@ static void arch_disable_ints(void);
|
||||
static bool arch_ints_disabled(void);
|
||||
static bool arch_in_int_handler(void);
|
||||
|
||||
/* use built in atomic intrinsics if the architecture doesn't otherwise
|
||||
* override it. */
|
||||
#if !defined(USE_BUILTIN_ATOMICS) || USE_BUILTIN_ATOMICS
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
|
||||
// TODO: implement
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static int atomic_swap(volatile int *ptr, int val);
|
||||
static int atomic_add(volatile int *ptr, int val);
|
||||
static int atomic_and(volatile int *ptr, int val);
|
||||
static int atomic_or(volatile int *ptr, int val);
|
||||
#endif
|
||||
|
||||
static uint32_t arch_cycle_count(void);
|
||||
|
||||
|
||||
@@ -49,22 +49,6 @@ static inline bool arch_ints_disabled(void) {
|
||||
return !(state & (1<<1));
|
||||
}
|
||||
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/* use a global pointer to store the current_thread */
|
||||
extern struct thread *_current_thread;
|
||||
|
||||
|
||||
@@ -40,22 +40,6 @@ static inline bool arch_ints_disabled(void) {
|
||||
return (state & (1<<1)) || !(state & (1<<0)); // check if EXL or IE is set
|
||||
}
|
||||
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/* use a global pointer to store the current_thread */
|
||||
extern struct thread *_current_thread;
|
||||
|
||||
|
||||
@@ -25,22 +25,6 @@ static inline bool arch_ints_disabled(void) {
|
||||
return !(riscv_csr_read(RISCV_CSR_XSTATUS) & RISCV_CSR_XSTATUS_IE);
|
||||
}
|
||||
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
// store the current thread in the tp register which is reserved in the ABI
|
||||
// as pointing to thread local storage.
|
||||
register struct thread *__current_thread asm("tp");
|
||||
|
||||
@@ -39,6 +39,7 @@ GLOBAL_DEFINES += \
|
||||
KERNEL_ASPACE_BASE=$(KERNEL_ASPACE_BASE) \
|
||||
KERNEL_ASPACE_SIZE=$(KERNEL_ASPACE_SIZE) \
|
||||
SMP_MAX_CPUS=1 \
|
||||
USE_BUILTIN_ATOMICS=0 \
|
||||
|
||||
MODULE_SRCS += \
|
||||
$(SUBARCH_DIR)/start.S \
|
||||
|
||||
Reference in New Issue
Block a user