[arch] move the atomic ops into a separate header
Now you need to include arch/atomic.h to get to the atomic routines. This simplifies a recusion issue in the way arch/ops.h included arch_ops. Also just generally makes things cleaner.
This commit is contained in:
@@ -17,6 +17,7 @@
|
||||
#include <kernel/semaphore.h>
|
||||
#include <kernel/event.h>
|
||||
#include <platform.h>
|
||||
#include <arch/atomic.h>
|
||||
|
||||
static int sleep_thread(void *arg) {
|
||||
for (;;) {
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <stdio.h>
|
||||
#include <lk/reg.h>
|
||||
#include <arch.h>
|
||||
#include <arch/atomic.h>
|
||||
#include <arch/ops.h>
|
||||
#include <arch/mmu.h>
|
||||
#include <arch/arm.h>
|
||||
|
||||
85
arch/arm/include/arch/arch_atomic.h
Normal file
85
arch/arm/include/arch/arch_atomic.h
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2014 Travis Geiselbrecht
|
||||
*
|
||||
* Use of this source code is governed by a MIT-style
|
||||
* license that can be found in the LICENSE file or at
|
||||
* https://opensource.org/licenses/MIT
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <arch/ops.h>
|
||||
|
||||
// define simple implementations of the atomic routines for these cpus
|
||||
// that do not otherwise have an implementation.
|
||||
#if !USE_BUILTIN_ATOMICS
|
||||
#if ARM_ISA_ARMV6M // cortex-m0 cortex-m0+
|
||||
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = temp + val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = temp & val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = temp | val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
if (temp == oldval) {
|
||||
*ptr = newval;
|
||||
}
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
#endif // ARM_ISA_ARMV6M
|
||||
#endif // !USE_BUILTIN_ATOMICS
|
||||
@@ -159,8 +159,6 @@ static inline bool arch_fiqs_disabled(void) {
|
||||
return !!state;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void arch_enable_ints(void) {
|
||||
CF;
|
||||
__asm__ volatile("cpsie i");
|
||||
@@ -178,73 +176,6 @@ static inline bool arch_ints_disabled(void) {
|
||||
return !!state;
|
||||
}
|
||||
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = temp + val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = temp & val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = temp | val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
*ptr = val;
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
|
||||
int temp;
|
||||
bool state;
|
||||
|
||||
state = arch_ints_disabled();
|
||||
arch_disable_ints();
|
||||
temp = *ptr;
|
||||
if (temp == oldval) {
|
||||
*ptr = newval;
|
||||
}
|
||||
if (!state)
|
||||
arch_enable_ints();
|
||||
return temp;
|
||||
}
|
||||
|
||||
static inline uint32_t arch_cycle_count(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <lk/debug.h>
|
||||
#include <stdlib.h>
|
||||
#include <arch.h>
|
||||
#include <arch/atomic.h>
|
||||
#include <arch/ops.h>
|
||||
#include <arch/arm64.h>
|
||||
#include <arch/arm64/mmu.h>
|
||||
|
||||
48
arch/include/arch/atomic.h
Normal file
48
arch/include/arch/atomic.h
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2014 Travis Geiselbrecht
|
||||
*
|
||||
* Use of this source code is governed by a MIT-style
|
||||
* license that can be found in the LICENSE file or at
|
||||
* https://opensource.org/licenses/MIT
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <lk/compiler.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
__BEGIN_CDECLS
|
||||
|
||||
/* use built in atomic intrinsics if the architecture doesn't otherwise
|
||||
* override it. */
|
||||
#if !defined(USE_BUILTIN_ATOMICS) || USE_BUILTIN_ATOMICS
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
|
||||
// TODO: implement
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static int atomic_swap(volatile int *ptr, int val);
|
||||
static int atomic_add(volatile int *ptr, int val);
|
||||
static int atomic_and(volatile int *ptr, int val);
|
||||
static int atomic_or(volatile int *ptr, int val);
|
||||
|
||||
/* if an implementation wants to implement it themselves */
|
||||
#include <arch/arch_atomic.h>
|
||||
|
||||
#endif
|
||||
|
||||
@@ -22,36 +22,6 @@ static void arch_disable_ints(void);
|
||||
static bool arch_ints_disabled(void);
|
||||
static bool arch_in_int_handler(void);
|
||||
|
||||
/* use built in atomic intrinsics if the architecture doesn't otherwise
|
||||
* override it. */
|
||||
#if !defined(USE_BUILTIN_ATOMICS) || USE_BUILTIN_ATOMICS
|
||||
static inline int atomic_add(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_or(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_and(volatile int *ptr, int val) {
|
||||
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_swap(volatile int *ptr, int val) {
|
||||
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
|
||||
}
|
||||
static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
|
||||
// TODO: implement
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static int atomic_swap(volatile int *ptr, int val);
|
||||
static int atomic_add(volatile int *ptr, int val);
|
||||
static int atomic_and(volatile int *ptr, int val);
|
||||
static int atomic_or(volatile int *ptr, int val);
|
||||
#endif
|
||||
|
||||
static uint32_t arch_cycle_count(void);
|
||||
|
||||
static uint arch_curr_cpu_num(void);
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <lk/init.h>
|
||||
#include <lk/main.h>
|
||||
|
||||
#include <arch/atomic.h>
|
||||
#include <arch/ops.h>
|
||||
#include <arch/mp.h>
|
||||
#include <arch/riscv/clint.h>
|
||||
|
||||
1
external/lib/lwip/netif.c
vendored
1
external/lib/lwip/netif.c
vendored
@@ -1,5 +1,6 @@
|
||||
#include <dev/class/netif.h>
|
||||
#include <kernel/event.h>
|
||||
#include <arch/atomic.h>
|
||||
#include <arch/ops.h>
|
||||
#include <netif/etharp.h>
|
||||
#include <lwip/netif.h>
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <kernel/mp.h>
|
||||
|
||||
#include <arch/atomic.h>
|
||||
#include <arch/mp.h>
|
||||
#include <assert.h>
|
||||
#include <kernel/spinlock.h>
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
* license that can be found in the LICENSE file or at
|
||||
* https://opensource.org/licenses/MIT
|
||||
*/
|
||||
#include <lib/bio.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <lk/debug.h>
|
||||
#include <lk/trace.h>
|
||||
@@ -13,9 +15,9 @@
|
||||
#include <assert.h>
|
||||
#include <lk/list.h>
|
||||
#include <lk/pow2.h>
|
||||
#include <lib/bio.h>
|
||||
#include <kernel/mutex.h>
|
||||
#include <lk/init.h>
|
||||
#include <arch/atomic.h>
|
||||
|
||||
#define LOCAL_TRACE 0
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <kernel/semaphore.h>
|
||||
#include <arch/ops.h>
|
||||
#include <platform.h>
|
||||
#include <arch/atomic.h>
|
||||
|
||||
#define LOCAL_TRACE 0
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <arch/atomic.h>
|
||||
#include <arch/ops.h>
|
||||
#include <lk/compiler.h>
|
||||
|
||||
|
||||
Reference in New Issue
Block a user