[arch] move the atomic ops into a separate header

Now you need to include arch/atomic.h to get to the atomic routines.
This simplifies a recusion issue in the way arch/ops.h included
arch_ops. Also just generally makes things cleaner.
This commit is contained in:
Travis Geiselbrecht
2020-05-16 14:58:03 -07:00
parent 556c985b0c
commit f371fa246b
13 changed files with 144 additions and 100 deletions

View File

@@ -17,6 +17,7 @@
#include <kernel/semaphore.h>
#include <kernel/event.h>
#include <platform.h>
#include <arch/atomic.h>
static int sleep_thread(void *arg) {
for (;;) {

View File

@@ -13,6 +13,7 @@
#include <stdio.h>
#include <lk/reg.h>
#include <arch.h>
#include <arch/atomic.h>
#include <arch/ops.h>
#include <arch/mmu.h>
#include <arch/arm.h>

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2008-2014 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <arch/ops.h>
// define simple implementations of the atomic routines for these cpus
// that do not otherwise have an implementation.
#if !USE_BUILTIN_ATOMICS
#if ARM_ISA_ARMV6M // cortex-m0 cortex-m0+
static inline int atomic_add(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = temp + val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_and(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = temp & val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_or(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = temp | val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_swap(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
if (temp == oldval) {
*ptr = newval;
}
if (!state)
arch_enable_ints();
return temp;
}
#endif // ARM_ISA_ARMV6M
#endif // !USE_BUILTIN_ATOMICS

View File

@@ -159,8 +159,6 @@ static inline bool arch_fiqs_disabled(void) {
return !!state;
}
static inline void arch_enable_ints(void) {
CF;
__asm__ volatile("cpsie i");
@@ -178,73 +176,6 @@ static inline bool arch_ints_disabled(void) {
return !!state;
}
static inline int atomic_add(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = temp + val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_and(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = temp & val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_or(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = temp | val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_swap(volatile int *ptr, int val) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
*ptr = val;
if (!state)
arch_enable_ints();
return temp;
}
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
int temp;
bool state;
state = arch_ints_disabled();
arch_disable_ints();
temp = *ptr;
if (temp == oldval) {
*ptr = newval;
}
if (!state)
arch_enable_ints();
return temp;
}
static inline uint32_t arch_cycle_count(void) {
return 0;
}

View File

@@ -8,6 +8,7 @@
#include <lk/debug.h>
#include <stdlib.h>
#include <arch.h>
#include <arch/atomic.h>
#include <arch/ops.h>
#include <arch/arm64.h>
#include <arch/arm64/mmu.h>

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2008-2014 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <lk/compiler.h>
#include <stdbool.h>
__BEGIN_CDECLS
/* use built in atomic intrinsics if the architecture doesn't otherwise
* override it. */
#if !defined(USE_BUILTIN_ATOMICS) || USE_BUILTIN_ATOMICS
static inline int atomic_add(volatile int *ptr, int val) {
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_or(volatile int *ptr, int val) {
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_and(volatile int *ptr, int val) {
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_swap(volatile int *ptr, int val) {
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
// TODO: implement
return 0;
}
#else
static int atomic_swap(volatile int *ptr, int val);
static int atomic_add(volatile int *ptr, int val);
static int atomic_and(volatile int *ptr, int val);
static int atomic_or(volatile int *ptr, int val);
/* if an implementation wants to implement it themselves */
#include <arch/arch_atomic.h>
#endif

View File

@@ -22,36 +22,6 @@ static void arch_disable_ints(void);
static bool arch_ints_disabled(void);
static bool arch_in_int_handler(void);
/* use built in atomic intrinsics if the architecture doesn't otherwise
* override it. */
#if !defined(USE_BUILTIN_ATOMICS) || USE_BUILTIN_ATOMICS
static inline int atomic_add(volatile int *ptr, int val) {
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_or(volatile int *ptr, int val) {
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_and(volatile int *ptr, int val) {
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_swap(volatile int *ptr, int val) {
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
// TODO: implement
return 0;
}
#else
static int atomic_swap(volatile int *ptr, int val);
static int atomic_add(volatile int *ptr, int val);
static int atomic_and(volatile int *ptr, int val);
static int atomic_or(volatile int *ptr, int val);
#endif
static uint32_t arch_cycle_count(void);
static uint arch_curr_cpu_num(void);

View File

@@ -15,6 +15,7 @@
#include <lk/init.h>
#include <lk/main.h>
#include <arch/atomic.h>
#include <arch/ops.h>
#include <arch/mp.h>
#include <arch/riscv/clint.h>

View File

@@ -1,5 +1,6 @@
#include <dev/class/netif.h>
#include <kernel/event.h>
#include <arch/atomic.h>
#include <arch/ops.h>
#include <netif/etharp.h>
#include <lwip/netif.h>

View File

@@ -8,6 +8,7 @@
#include <kernel/mp.h>
#include <arch/atomic.h>
#include <arch/mp.h>
#include <assert.h>
#include <kernel/spinlock.h>

View File

@@ -5,6 +5,8 @@
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lib/bio.h>
#include <stdlib.h>
#include <lk/debug.h>
#include <lk/trace.h>
@@ -13,9 +15,9 @@
#include <assert.h>
#include <lk/list.h>
#include <lk/pow2.h>
#include <lib/bio.h>
#include <kernel/mutex.h>
#include <lk/init.h>
#include <arch/atomic.h>
#define LOCAL_TRACE 0

View File

@@ -21,6 +21,7 @@
#include <kernel/semaphore.h>
#include <arch/ops.h>
#include <platform.h>
#include <arch/atomic.h>
#define LOCAL_TRACE 0

View File

@@ -7,6 +7,7 @@
*/
#pragma once
#include <arch/atomic.h>
#include <arch/ops.h>
#include <lk/compiler.h>