[kernel] remove critical_section, move everything to spinlocks
This commit is contained in:
@@ -584,7 +584,7 @@ static void spinlock_test(void)
|
||||
printf("testing spinlock:\n");
|
||||
ASSERT(!spin_lock_held(&lock));
|
||||
ASSERT(!arch_ints_disabled());
|
||||
spin_lock_irqsave(&lock, &state);
|
||||
spin_lock_irqsave(&lock, state);
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&lock));
|
||||
spin_unlock_irqrestore(&lock, state);
|
||||
@@ -604,7 +604,7 @@ static void spinlock_test(void)
|
||||
|
||||
c = arch_cycle_count();
|
||||
for (uint i = 0; i < COUNT; i++) {
|
||||
spin_lock_irqsave(&lock, &state);
|
||||
spin_lock_irqsave(&lock, state);
|
||||
spin_unlock_irqrestore(&lock, state);
|
||||
}
|
||||
c = arch_cycle_count() - c;
|
||||
|
||||
@@ -161,12 +161,6 @@ FUNCTION(arm_irq)
|
||||
|
||||
save_offset #4
|
||||
|
||||
/* increment the global critical section count */
|
||||
LOADCONST(r1, critical_section_count)
|
||||
ldr r0, [r1]
|
||||
add r0, r0, #1
|
||||
str r0, [r1]
|
||||
|
||||
/* track that we're inside an irq handler */
|
||||
LOADCONST(r1, __arm_in_handler)
|
||||
mov r0, #1
|
||||
@@ -193,12 +187,6 @@ FUNCTION(arm_irq)
|
||||
cmp r0, #0
|
||||
blne thread_preempt
|
||||
|
||||
/* decrement the global critical section count */
|
||||
LOADCONST(r1, critical_section_count)
|
||||
ldr r0, [r1]
|
||||
sub r0, r0, #1
|
||||
str r0, [r1]
|
||||
|
||||
restore
|
||||
|
||||
FUNCTION(arm_fiq)
|
||||
|
||||
@@ -87,7 +87,6 @@ static void dump_iframe(struct arm_iframe *frame)
|
||||
|
||||
static void exception_die(struct arm_fault_frame *frame, const char *msg)
|
||||
{
|
||||
inc_critical_section();
|
||||
dprintf(CRITICAL, msg);
|
||||
dump_fault_frame(frame);
|
||||
|
||||
|
||||
@@ -49,8 +49,9 @@ static void initial_thread_func(void)
|
||||
// dprintf("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
|
||||
// dump_thread(current_thread);
|
||||
|
||||
/* exit the implicit critical section we're within */
|
||||
exit_critical_section();
|
||||
/* release the thread lock that was implicitly held across the reschedule */
|
||||
spin_unlock(&thread_lock);
|
||||
arch_enable_ints();
|
||||
|
||||
thread_t *ct = get_current_thread();
|
||||
ret = ct->entry(ct->arg);
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include <trace.h>
|
||||
#include <lib/fixed_point.h>
|
||||
#include <kernel/thread.h>
|
||||
#include <kernel/spinlock.h>
|
||||
#include <platform.h>
|
||||
#include <platform/interrupts.h>
|
||||
#include <platform/timer.h>
|
||||
@@ -69,6 +70,7 @@
|
||||
|
||||
static platform_timer_callback t_callback;
|
||||
static addr_t scu_control_base;
|
||||
static spin_lock_t lock = SPIN_LOCK_INITIAL_VALUE;
|
||||
|
||||
static lk_time_t periodic_interval;
|
||||
static lk_time_t oneshot_interval;
|
||||
@@ -118,7 +120,8 @@ status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg
|
||||
if (unlikely(ticks > 0xffffffff))
|
||||
ticks = 0xffffffff;
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&lock, state);
|
||||
|
||||
t_callback = callback;
|
||||
|
||||
@@ -130,7 +133,7 @@ status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg
|
||||
TIMREG(TIMER_LOAD) = ticks;
|
||||
TIMREG(TIMER_CONTROL) = (1<<2) | (1<<1) | (1<<0); // irq enable, autoreload, enable
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&lock, state);
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
@@ -145,7 +148,8 @@ status_t platform_set_oneshot_timer (platform_timer_callback callback, void *arg
|
||||
if (unlikely(ticks > 0xffffffff))
|
||||
ticks = 0xffffffff;
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&lock, state);
|
||||
|
||||
t_callback = callback;
|
||||
oneshot_interval = interval;
|
||||
@@ -156,7 +160,7 @@ status_t platform_set_oneshot_timer (platform_timer_callback callback, void *arg
|
||||
TIMREG(TIMER_LOAD) = ticks;
|
||||
TIMREG(TIMER_CONTROL) = (1<<2) | (1<<0) | (1<<0); // irq enable, oneshot, enable
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&lock, state);
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
@@ -22,8 +22,11 @@
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <compiler.h>
|
||||
#include <arch/spinlock.h>
|
||||
|
||||
__BEGIN_CDECLS
|
||||
|
||||
/* interrupts should already be disabled */
|
||||
static inline void spin_lock(spin_lock_t *lock)
|
||||
{
|
||||
@@ -84,5 +87,7 @@ static inline void spin_unlock_restore(
|
||||
}
|
||||
|
||||
/* hand(ier) routines */
|
||||
#define spin_lock_irqsave(lock, statep) spin_lock_save(lock, statep, SPIN_LOCK_FLAG_INTERRUPTS)
|
||||
#define spin_lock_irqsave(lock, statep) spin_lock_save(lock, &(statep), SPIN_LOCK_FLAG_INTERRUPTS)
|
||||
#define spin_unlock_irqrestore(lock, statep) spin_unlock_restore(lock, statep, SPIN_LOCK_FLAG_INTERRUPTS)
|
||||
|
||||
__END_CDECLS
|
||||
|
||||
@@ -64,7 +64,6 @@ typedef struct thread {
|
||||
struct list_node queue_node;
|
||||
int priority;
|
||||
enum thread_state state;
|
||||
int saved_critical_section_count;
|
||||
int remaining_quantum;
|
||||
unsigned int flags;
|
||||
|
||||
@@ -142,36 +141,11 @@ enum handler_return thread_timer_tick(void);
|
||||
thread_t *get_current_thread(void);
|
||||
void set_current_thread(thread_t *);
|
||||
|
||||
/* critical sections */
|
||||
extern int critical_section_count;
|
||||
/* scheduler lock */
|
||||
extern spin_lock_t thread_lock;
|
||||
|
||||
static inline __ALWAYS_INLINE void enter_critical_section(void)
|
||||
{
|
||||
CF;
|
||||
if (critical_section_count == 0)
|
||||
arch_disable_ints();
|
||||
critical_section_count++;
|
||||
CF;
|
||||
}
|
||||
|
||||
static inline __ALWAYS_INLINE void exit_critical_section(void)
|
||||
{
|
||||
CF;
|
||||
critical_section_count--;
|
||||
if (critical_section_count == 0)
|
||||
arch_enable_ints();
|
||||
CF;
|
||||
}
|
||||
|
||||
static inline __ALWAYS_INLINE bool in_critical_section(void)
|
||||
{
|
||||
CF;
|
||||
return critical_section_count > 0;
|
||||
}
|
||||
|
||||
/* only used by interrupt glue */
|
||||
static inline void inc_critical_section(void) { critical_section_count++; }
|
||||
static inline void dec_critical_section(void) { critical_section_count--; }
|
||||
#define THREAD_LOCK(state) spin_lock_saved_state_t state; spin_lock_irqsave(&thread_lock, state)
|
||||
#define THREAD_UNLOCK(state) spin_unlock_irqrestore(&thread_lock, state)
|
||||
|
||||
/* thread local storage */
|
||||
static inline __ALWAYS_INLINE uintptr_t tls_get(uint entry)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2012 Travis Geiselbrecht
|
||||
* Copyright (c) 2008-2014 Travis Geiselbrecht
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <kernel/event.h>
|
||||
#include <kernel/spinlock.h>
|
||||
|
||||
typedef struct cbuf {
|
||||
uint head;
|
||||
@@ -32,6 +33,7 @@ typedef struct cbuf {
|
||||
uint len_pow2;
|
||||
char *buf;
|
||||
event_t event;
|
||||
spin_lock_t lock;
|
||||
} cbuf_t;
|
||||
|
||||
void cbuf_initialize(cbuf_t *cbuf, size_t len);
|
||||
|
||||
@@ -119,8 +119,6 @@ static int cmd_threadload(int argc, const cmd_args *argv)
|
||||
static bool showthreadload = false;
|
||||
static timer_t tltimer;
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
if (showthreadload == false) {
|
||||
// start the display
|
||||
timer_initialize(&tltimer);
|
||||
@@ -131,8 +129,6 @@ static int cmd_threadload(int argc, const cmd_args *argv)
|
||||
showthreadload = false;
|
||||
}
|
||||
|
||||
exit_critical_section();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2013 Travis Geiselbrecht
|
||||
* Copyright (c) 2008-2014 Travis Geiselbrecht
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files
|
||||
@@ -40,10 +40,11 @@
|
||||
* @{
|
||||
*/
|
||||
|
||||
#include <kernel/event.h>
|
||||
#include <debug.h>
|
||||
#include <assert.h>
|
||||
#include <err.h>
|
||||
#include <kernel/event.h>
|
||||
#include <kernel/thread.h>
|
||||
|
||||
/**
|
||||
* @brief Initialize an event object
|
||||
@@ -70,14 +71,14 @@ void event_destroy(event_t *e)
|
||||
{
|
||||
DEBUG_ASSERT(e->magic == EVENT_MAGIC);
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
e->magic = 0;
|
||||
e->signalled = false;
|
||||
e->flags = 0;
|
||||
wait_queue_destroy(&e->wait, true);
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -101,7 +102,7 @@ status_t event_wait_timeout(event_t *e, lk_time_t timeout)
|
||||
|
||||
DEBUG_ASSERT(e->magic == EVENT_MAGIC);
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
if (e->signalled) {
|
||||
/* signalled, we're going to fall through */
|
||||
@@ -112,12 +113,9 @@ status_t event_wait_timeout(event_t *e, lk_time_t timeout)
|
||||
} else {
|
||||
/* unsignalled, block here */
|
||||
ret = wait_queue_block(&e->wait, timeout);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
}
|
||||
|
||||
err:
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -143,7 +141,7 @@ status_t event_signal(event_t *e, bool reschedule)
|
||||
{
|
||||
DEBUG_ASSERT(e->magic == EVENT_MAGIC);
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
if (!e->signalled) {
|
||||
if (e->flags & EVENT_FLAG_AUTOUNSIGNAL) {
|
||||
@@ -163,7 +161,7 @@ status_t event_signal(event_t *e, bool reschedule)
|
||||
}
|
||||
}
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
@@ -60,11 +60,11 @@ void mutex_destroy(mutex_t *m)
|
||||
get_current_thread(), get_current_thread()->name, m, m->holder, m->holder->name);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
m->magic = 0;
|
||||
m->count = 0;
|
||||
wait_queue_destroy(&m->wait, true);
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -87,7 +87,7 @@ status_t mutex_acquire_timeout(mutex_t *m, lk_time_t timeout)
|
||||
get_current_thread(), get_current_thread()->name, m);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
status_t ret = NO_ERROR;
|
||||
if (unlikely(++m->count > 1)) {
|
||||
@@ -112,7 +112,7 @@ status_t mutex_acquire_timeout(mutex_t *m, lk_time_t timeout)
|
||||
m->holder = get_current_thread();
|
||||
|
||||
err:
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ status_t mutex_release(mutex_t *m)
|
||||
}
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
m->holder = 0;
|
||||
|
||||
@@ -139,7 +139,7 @@ status_t mutex_release(mutex_t *m)
|
||||
wait_queue_wake_one(&m->wait, true, NO_ERROR);
|
||||
}
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,16 +26,16 @@ void sem_init(semaphore_t *sem, unsigned int value)
|
||||
|
||||
void sem_destroy(semaphore_t *sem)
|
||||
{
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
sem->count = 0;
|
||||
wait_queue_destroy(&sem->wait, true);
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
}
|
||||
|
||||
status_t sem_post(semaphore_t *sem, bool resched)
|
||||
{
|
||||
status_t ret = NO_ERROR;
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
/*
|
||||
* If the count is or was negative then a thread is waiting for a resource, otherwise
|
||||
@@ -44,14 +44,14 @@ status_t sem_post(semaphore_t *sem, bool resched)
|
||||
if (unlikely(++sem->count <= 0))
|
||||
wait_queue_wake_one(&sem->wait, resched, NO_ERROR);
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
status_t sem_wait(semaphore_t *sem)
|
||||
{
|
||||
status_t ret = NO_ERROR;
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
/*
|
||||
* If there are no resources available then we need to
|
||||
@@ -60,28 +60,28 @@ status_t sem_wait(semaphore_t *sem)
|
||||
if (unlikely(--sem->count < 0))
|
||||
ret = wait_queue_block(&sem->wait, INFINITE_TIME);
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
status_t sem_trywait(semaphore_t *sem)
|
||||
{
|
||||
status_t ret = NO_ERROR;
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
if (unlikely(sem->count <= 0))
|
||||
ret = ERR_NOT_READY;
|
||||
else
|
||||
sem->count--;
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
status_t sem_timedwait(semaphore_t *sem, lk_time_t timeout)
|
||||
{
|
||||
status_t ret = NO_ERROR;
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
if (unlikely(--sem->count < 0)) {
|
||||
ret = wait_queue_block(&sem->wait, timeout);
|
||||
@@ -92,6 +92,6 @@ status_t sem_timedwait(semaphore_t *sem, lk_time_t timeout)
|
||||
}
|
||||
}
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
126
kernel/thread.c
126
kernel/thread.c
@@ -55,8 +55,8 @@ struct thread_stats thread_stats;
|
||||
/* global thread list */
|
||||
static struct list_node thread_list;
|
||||
|
||||
/* the global critical section count */
|
||||
int critical_section_count;
|
||||
/* master thread spinlock */
|
||||
spin_lock_t thread_lock = SPIN_LOCK_INITIAL_VALUE;
|
||||
|
||||
/* the run queue */
|
||||
static struct list_node run_queue[NUM_PRIORITIES];
|
||||
@@ -84,7 +84,8 @@ static void insert_in_run_queue_head(thread_t *t)
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
ASSERT(t->state == THREAD_READY);
|
||||
ASSERT(!list_in_list(&t->queue_node));
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
list_add_head(&run_queue[t->priority], &t->queue_node);
|
||||
@@ -97,7 +98,8 @@ static void insert_in_run_queue_tail(thread_t *t)
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
ASSERT(t->state == THREAD_READY);
|
||||
ASSERT(!list_in_list(&t->queue_node));
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
list_add_tail(&run_queue[t->priority], &t->queue_node);
|
||||
@@ -154,7 +156,6 @@ thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine
|
||||
t->entry = entry;
|
||||
t->arg = arg;
|
||||
t->priority = priority;
|
||||
t->saved_critical_section_count = 1; /* we always start inside a critical section */
|
||||
t->state = THREAD_SUSPENDED;
|
||||
t->blocking_wait_queue = NULL;
|
||||
t->wait_queue_block_ret = NO_ERROR;
|
||||
@@ -188,9 +189,9 @@ thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine
|
||||
arch_thread_initialize(t);
|
||||
|
||||
/* add it to the global thread list */
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
list_add_head(&thread_list, &t->thread_list_node);
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
|
||||
return t;
|
||||
}
|
||||
@@ -251,13 +252,17 @@ status_t thread_resume(thread_t *t)
|
||||
ASSERT(t->state != THREAD_DEATH);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
bool resched = false;
|
||||
THREAD_LOCK(state);
|
||||
if (t->state == THREAD_SUSPENDED) {
|
||||
t->state = THREAD_READY;
|
||||
insert_in_run_queue_head(t);
|
||||
thread_yield();
|
||||
resched = true;
|
||||
}
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
|
||||
if (resched)
|
||||
thread_yield();
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
@@ -277,11 +282,11 @@ status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout)
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
if (t->flags & THREAD_FLAG_DETACHED) {
|
||||
/* the thread is detached, go ahead and exit */
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return ERR_THREAD_DETACHED;
|
||||
}
|
||||
|
||||
@@ -289,7 +294,7 @@ status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout)
|
||||
if (t->state != THREAD_DEATH) {
|
||||
status_t err = wait_queue_block(&t->retcode_wait_queue, timeout);
|
||||
if (err < 0) {
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@@ -311,7 +316,7 @@ status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout)
|
||||
/* clear the structure's magic */
|
||||
t->magic = 0;
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
|
||||
/* free its stack and the thread structure itself */
|
||||
if (t->flags & THREAD_FLAG_FREE_STACK && t->stack)
|
||||
@@ -329,7 +334,7 @@ status_t thread_detach(thread_t *t)
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
/* if another thread is blocked inside thread_join() on this thread,
|
||||
* wake them up with a specific return code */
|
||||
@@ -338,11 +343,11 @@ status_t thread_detach(thread_t *t)
|
||||
/* if it's already dead, then just do what join would have and exit */
|
||||
if (t->state == THREAD_DEATH) {
|
||||
t->flags &= ~THREAD_FLAG_DETACHED; /* makes sure thread_join continues */
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return thread_join(t, NULL, 0);
|
||||
} else {
|
||||
t->flags |= THREAD_FLAG_DETACHED;
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
return NO_ERROR;
|
||||
}
|
||||
}
|
||||
@@ -365,7 +370,7 @@ void thread_exit(int retcode)
|
||||
|
||||
// dprintf("thread_exit: current %p\n", current_thread);
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
/* enter the dead state */
|
||||
current_thread->state = THREAD_DEATH;
|
||||
@@ -419,11 +424,9 @@ void thread_resched(void)
|
||||
|
||||
thread_t *current_thread = get_current_thread();
|
||||
|
||||
// printf("thread_resched: current %p: ", current_thread);
|
||||
// dump_thread(current_thread);
|
||||
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
THREAD_STATS_INC(reschedules);
|
||||
@@ -439,7 +442,6 @@ void thread_resched(void)
|
||||
#endif
|
||||
|
||||
int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
|
||||
//dprintf(SPEW, "bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue);
|
||||
|
||||
newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
|
||||
|
||||
@@ -450,9 +452,6 @@ void thread_resched(void)
|
||||
ASSERT(newthread);
|
||||
#endif
|
||||
|
||||
// printf("newthread: ");
|
||||
// dump_thread(newthread);
|
||||
|
||||
newthread->state = THREAD_RUNNING;
|
||||
|
||||
if (newthread == oldthread)
|
||||
@@ -477,11 +476,6 @@ void thread_resched(void)
|
||||
|
||||
KEVLOG_THREAD_SWITCH(oldthread, newthread);
|
||||
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(critical_section_count > 0);
|
||||
ASSERT(newthread->saved_critical_section_count > 0);
|
||||
#endif
|
||||
|
||||
#if PLATFORM_HAS_DYNAMIC_TIMER
|
||||
if (thread_is_real_time(newthread)) {
|
||||
if (!thread_is_real_time(oldthread)) {
|
||||
@@ -500,9 +494,7 @@ void thread_resched(void)
|
||||
target_set_debug_led(0, newthread != idle_thread);
|
||||
|
||||
/* do the switch */
|
||||
oldthread->saved_critical_section_count = critical_section_count;
|
||||
set_current_thread(newthread);
|
||||
critical_section_count = newthread->saved_critical_section_count;
|
||||
arch_context_switch(oldthread, newthread);
|
||||
}
|
||||
|
||||
@@ -524,7 +516,7 @@ void thread_yield(void)
|
||||
ASSERT(current_thread->state == THREAD_RUNNING);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
|
||||
THREAD_STATS_INC(yields);
|
||||
|
||||
@@ -534,7 +526,7 @@ void thread_yield(void)
|
||||
insert_in_run_queue_tail(current_thread);
|
||||
thread_resched();
|
||||
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -559,7 +551,6 @@ void thread_preempt(void)
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(current_thread->magic == THREAD_MAGIC);
|
||||
ASSERT(current_thread->state == THREAD_RUNNING);
|
||||
ASSERT(in_critical_section());
|
||||
#endif
|
||||
|
||||
#if THREAD_STATS
|
||||
@@ -569,6 +560,8 @@ void thread_preempt(void)
|
||||
|
||||
KEVLOG_THREAD_PREEMPT(current_thread);
|
||||
|
||||
THREAD_LOCK(state);
|
||||
|
||||
/* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
|
||||
current_thread->state = THREAD_READY;
|
||||
if (current_thread->remaining_quantum > 0)
|
||||
@@ -576,6 +569,8 @@ void thread_preempt(void)
|
||||
else
|
||||
insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
|
||||
thread_resched();
|
||||
|
||||
THREAD_UNLOCK(state);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -595,7 +590,7 @@ void thread_block(void)
|
||||
|
||||
ASSERT(current_thread->magic == THREAD_MAGIC);
|
||||
ASSERT(current_thread->state == THREAD_BLOCKED);
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
/* we are blocking on something. the blocking code should have already stuck us on a queue */
|
||||
@@ -607,7 +602,7 @@ void thread_unblock(thread_t *t, bool resched)
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
ASSERT(t->state == THREAD_BLOCKED);
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
t->state = THREAD_READY;
|
||||
@@ -641,9 +636,13 @@ static enum handler_return thread_sleep_handler(timer_t *timer, lk_time_t now, v
|
||||
ASSERT(t->state == THREAD_SLEEPING);
|
||||
#endif
|
||||
|
||||
THREAD_LOCK(state);
|
||||
|
||||
t->state = THREAD_READY;
|
||||
insert_in_run_queue_head(t);
|
||||
|
||||
THREAD_UNLOCK(state);
|
||||
|
||||
return INT_RESCHEDULE;
|
||||
}
|
||||
|
||||
@@ -670,11 +669,11 @@ void thread_sleep(lk_time_t delay)
|
||||
|
||||
timer_initialize(&timer);
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
|
||||
current_thread->state = THREAD_SLEEPING;
|
||||
thread_resched();
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -700,7 +699,6 @@ void thread_init_early(void)
|
||||
/* half construct this thread, since we're already running */
|
||||
t->priority = HIGHEST_PRIORITY;
|
||||
t->state = THREAD_RUNNING;
|
||||
t->saved_critical_section_count = 1;
|
||||
t->flags = THREAD_FLAG_DETACHED;
|
||||
wait_queue_init(&t->retcode_wait_queue);
|
||||
list_add_head(&thread_list, &t->thread_list_node);
|
||||
@@ -735,8 +733,8 @@ void thread_set_name(const char *name)
|
||||
*/
|
||||
void thread_set_priority(int priority)
|
||||
{
|
||||
if (priority < LOWEST_PRIORITY)
|
||||
priority = LOWEST_PRIORITY;
|
||||
if (priority <= IDLE_PRIORITY)
|
||||
priority = IDLE_PRIORITY + 1;
|
||||
if (priority > HIGHEST_PRIORITY)
|
||||
priority = HIGHEST_PRIORITY;
|
||||
get_current_thread()->priority = priority;
|
||||
@@ -760,8 +758,8 @@ void thread_become_idle(void)
|
||||
* timer when it is scheduled. */
|
||||
thread_set_real_time(idle_thread);
|
||||
|
||||
/* release the implicit boot critical section and yield to the scheduler */
|
||||
exit_critical_section();
|
||||
/* enable interrupts and start the scheduler */
|
||||
arch_enable_ints();
|
||||
thread_yield();
|
||||
|
||||
idle_thread_routine();
|
||||
@@ -786,9 +784,8 @@ static const char *thread_state_to_str(enum thread_state state)
|
||||
void dump_thread(thread_t *t)
|
||||
{
|
||||
dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
|
||||
dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d, critical section %d\n",
|
||||
thread_state_to_str(t->state), t->priority, t->remaining_quantum,
|
||||
t->saved_critical_section_count);
|
||||
dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d\n",
|
||||
thread_state_to_str(t->state), t->priority, t->remaining_quantum);
|
||||
dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
|
||||
dprintf(INFO, "\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
|
||||
dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
|
||||
@@ -807,11 +804,11 @@ void dump_all_threads(void)
|
||||
{
|
||||
thread_t *t;
|
||||
|
||||
enter_critical_section();
|
||||
THREAD_LOCK(state);
|
||||
list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
|
||||
dump_thread(t);
|
||||
}
|
||||
exit_critical_section();
|
||||
THREAD_UNLOCK(state);
|
||||
}
|
||||
|
||||
/** @} */
|
||||
@@ -834,10 +831,16 @@ static enum handler_return wait_queue_timeout_handler(timer_t *timer, lk_time_t
|
||||
ASSERT(thread->magic == THREAD_MAGIC);
|
||||
#endif
|
||||
|
||||
if (thread_unblock_from_wait_queue(thread, ERR_TIMED_OUT) >= NO_ERROR)
|
||||
return INT_RESCHEDULE;
|
||||
spin_lock(&thread_lock);
|
||||
|
||||
return INT_NO_RESCHEDULE;
|
||||
enum handler_return ret = INT_NO_RESCHEDULE;
|
||||
if (thread_unblock_from_wait_queue(thread, ERR_TIMED_OUT) >= NO_ERROR) {
|
||||
ret = INT_RESCHEDULE;
|
||||
}
|
||||
|
||||
spin_unlock(&thread_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -867,7 +870,8 @@ status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout)
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
|
||||
ASSERT(current_thread->state == THREAD_RUNNING);
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
if (timeout == 0)
|
||||
@@ -885,7 +889,7 @@ status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout)
|
||||
timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
|
||||
}
|
||||
|
||||
thread_block();
|
||||
thread_resched();
|
||||
|
||||
/* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
|
||||
if (timeout != INFINITE_TIME) {
|
||||
@@ -918,7 +922,8 @@ int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue
|
||||
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
t = list_remove_head_type(&wait->list, thread_t, queue_node);
|
||||
@@ -972,7 +977,8 @@ int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue
|
||||
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
if (reschedule && wait->count > 0) {
|
||||
@@ -1017,7 +1023,8 @@ void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
|
||||
{
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
|
||||
wait->magic = 0;
|
||||
@@ -1038,8 +1045,9 @@ void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
|
||||
status_t thread_unblock_from_wait_queue(thread_t *t, status_t wait_queue_error)
|
||||
{
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(in_critical_section());
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
ASSERT(arch_ints_disabled());
|
||||
ASSERT(spin_lock_held(&thread_lock));
|
||||
#endif
|
||||
|
||||
if (t->state != THREAD_BLOCKED)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2009 Travis Geiselbrecht
|
||||
* Copyright (c) 2008-2014 Travis Geiselbrecht
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files
|
||||
@@ -41,12 +41,14 @@
|
||||
#include <kernel/thread.h>
|
||||
#include <kernel/timer.h>
|
||||
#include <kernel/debug.h>
|
||||
#include <kernel/spinlock.h>
|
||||
#include <platform/timer.h>
|
||||
#include <platform.h>
|
||||
|
||||
#define LOCAL_TRACE 0
|
||||
|
||||
static struct list_node timer_queue;
|
||||
static struct list_node timer_queue = LIST_INITIAL_VALUE(timer_queue);
|
||||
static spin_lock_t timer_lock = SPIN_LOCK_INITIAL_VALUE;
|
||||
|
||||
static enum handler_return timer_tick(void *arg, lk_time_t now);
|
||||
|
||||
@@ -62,6 +64,8 @@ static void insert_timer_in_queue(timer_t *timer)
|
||||
{
|
||||
timer_t *entry;
|
||||
|
||||
DEBUG_ASSERT(arch_ints_disabled());
|
||||
|
||||
LTRACEF("timer %p, scheduled %lu, periodic %lu\n", timer, timer->scheduled_time, timer->periodic_time);
|
||||
|
||||
list_for_every_entry(&timer_queue, entry, timer_t, node) {
|
||||
@@ -95,7 +99,8 @@ static void timer_set(timer_t *timer, lk_time_t delay, lk_time_t period, timer_c
|
||||
|
||||
LTRACEF("scheduled time %lu\n", timer->scheduled_time);
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&timer_lock, &state);
|
||||
|
||||
insert_timer_in_queue(timer);
|
||||
|
||||
@@ -107,7 +112,7 @@ static void timer_set(timer_t *timer, lk_time_t delay, lk_time_t period, timer_c
|
||||
}
|
||||
#endif
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&timer_lock, state);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -159,7 +164,8 @@ void timer_cancel(timer_t *timer)
|
||||
{
|
||||
DEBUG_ASSERT(timer->magic == TIMER_MAGIC);
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&timer_lock, &state);
|
||||
|
||||
#if PLATFORM_HAS_DYNAMIC_TIMER
|
||||
timer_t *oldhead = list_peek_head_type(&timer_queue, timer_t, node);
|
||||
@@ -195,7 +201,7 @@ void timer_cancel(timer_t *timer)
|
||||
}
|
||||
#endif
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&timer_lock, state);
|
||||
}
|
||||
|
||||
/* called at interrupt time to process any pending timers */
|
||||
@@ -204,11 +210,15 @@ static enum handler_return timer_tick(void *arg, lk_time_t now)
|
||||
timer_t *timer;
|
||||
enum handler_return ret = INT_NO_RESCHEDULE;
|
||||
|
||||
DEBUG_ASSERT(arch_ints_disabled());
|
||||
|
||||
THREAD_STATS_INC(timer_ints);
|
||||
// KEVLOG_TIMER_TICK(); // enable only if necessary
|
||||
|
||||
LTRACEF("now %lu, sp %p\n", now, __GET_FRAME());
|
||||
|
||||
spin_lock(&timer_lock);
|
||||
|
||||
for (;;) {
|
||||
/* see if there's an event to process */
|
||||
timer = list_peek_head_type(&timer_queue, timer_t, node);
|
||||
@@ -223,6 +233,9 @@ static enum handler_return timer_tick(void *arg, lk_time_t now)
|
||||
DEBUG_ASSERT(timer && timer->magic == TIMER_MAGIC);
|
||||
list_delete(&timer->node);
|
||||
|
||||
/* we pulled it off the list, release the list lock to handle it */
|
||||
spin_unlock(&timer_lock);
|
||||
|
||||
LTRACEF("dequeued timer %p, scheduled %lu periodic %lu\n", timer, timer->scheduled_time, timer->periodic_time);
|
||||
|
||||
THREAD_STATS_INC(timers);
|
||||
@@ -234,6 +247,9 @@ static enum handler_return timer_tick(void *arg, lk_time_t now)
|
||||
if (timer->callback(timer, now, timer->arg) == INT_RESCHEDULE)
|
||||
ret = INT_RESCHEDULE;
|
||||
|
||||
/* it may have been requeued or periodic, grab the lock so we can safely inspect it */
|
||||
spin_lock(&timer_lock);
|
||||
|
||||
/* if it was a periodic timer and it hasn't been requeued
|
||||
* by the callback put it back in the list
|
||||
*/
|
||||
@@ -256,21 +272,24 @@ static enum handler_return timer_tick(void *arg, lk_time_t now)
|
||||
LTRACEF("setting new timer for %u msecs for event %p\n", (uint)delay, timer);
|
||||
platform_set_oneshot_timer(timer_tick, NULL, delay);
|
||||
}
|
||||
|
||||
/* we're done manipulating the timer queue */
|
||||
spin_unlock(&timer_lock);
|
||||
#else
|
||||
/* release the timer lock before calling the tick handler */
|
||||
spin_unlock(&timer_lock);
|
||||
|
||||
/* let the scheduler have a shot to do quantum expiration, etc */
|
||||
/* in case of dynamic timer, the scheduler will set up a periodic timer */
|
||||
if (thread_timer_tick() == INT_RESCHEDULE)
|
||||
ret = INT_RESCHEDULE;
|
||||
#endif
|
||||
|
||||
DEBUG_ASSERT(in_critical_section());
|
||||
return ret;
|
||||
}
|
||||
|
||||
void timer_init(void)
|
||||
{
|
||||
list_initialize(&timer_queue);
|
||||
|
||||
#if !PLATFORM_HAS_DYNAMIC_TIMER
|
||||
/* register for a periodic timer tick */
|
||||
platform_set_periodic_timer(timer_tick, NULL, 10); /* 10ms */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2013 Travis Geiselbrecht
|
||||
* Copyright (c) 2008-2014 Travis Geiselbrecht
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files
|
||||
@@ -50,6 +50,7 @@ void cbuf_initialize_etc(cbuf_t *cbuf, size_t len, void *buf)
|
||||
cbuf->len_pow2 = log2_uint(len);
|
||||
cbuf->buf = buf;
|
||||
event_init(&cbuf->event, false, 0);
|
||||
spin_lock_init(&cbuf->lock);
|
||||
|
||||
LTRACEF("len %zd, len_pow2 %u\n", len, cbuf->len_pow2);
|
||||
}
|
||||
@@ -75,7 +76,8 @@ size_t cbuf_write(cbuf_t *cbuf, const void *_buf, size_t len, bool canreschedule
|
||||
DEBUG_ASSERT(_buf);
|
||||
DEBUG_ASSERT(len < valpow2(cbuf->len_pow2));
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&cbuf->lock, state);
|
||||
|
||||
size_t write_len;
|
||||
size_t pos = 0;
|
||||
@@ -99,9 +101,13 @@ size_t cbuf_write(cbuf_t *cbuf, const void *_buf, size_t len, bool canreschedule
|
||||
}
|
||||
|
||||
if (cbuf->head != cbuf->tail)
|
||||
event_signal(&cbuf->event, canreschedule);
|
||||
event_signal(&cbuf->event, false);
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&cbuf->lock, state);
|
||||
|
||||
// XXX convert to only rescheduling if
|
||||
if (canreschedule)
|
||||
thread_preempt();
|
||||
|
||||
return pos;
|
||||
}
|
||||
@@ -113,11 +119,15 @@ size_t cbuf_read(cbuf_t *cbuf, void *_buf, size_t buflen, bool block)
|
||||
DEBUG_ASSERT(cbuf);
|
||||
DEBUG_ASSERT(_buf);
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
retry:
|
||||
// block on the cbuf outside of the lock, which may
|
||||
// unblock us early and we'll have to double check below
|
||||
if (block)
|
||||
event_wait(&cbuf->event);
|
||||
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&cbuf->lock, state);
|
||||
|
||||
// see if there's data available
|
||||
size_t ret = 0;
|
||||
if (cbuf->tail != cbuf->head) {
|
||||
@@ -142,6 +152,7 @@ size_t cbuf_read(cbuf_t *cbuf, void *_buf, size_t buflen, bool block)
|
||||
}
|
||||
|
||||
if (cbuf->tail == cbuf->head) {
|
||||
DEBUG_ASSERT(pos > 0);
|
||||
// we've emptied the buffer, unsignal the event
|
||||
event_unsignal(&cbuf->event);
|
||||
}
|
||||
@@ -149,7 +160,11 @@ size_t cbuf_read(cbuf_t *cbuf, void *_buf, size_t buflen, bool block)
|
||||
ret = pos;
|
||||
}
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&cbuf->lock, state);
|
||||
|
||||
// we apparently blocked but raced with another thread and found no data, retry
|
||||
if (block && ret == 0)
|
||||
goto retry;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -158,7 +173,8 @@ size_t cbuf_write_char(cbuf_t *cbuf, char c, bool canreschedule)
|
||||
{
|
||||
DEBUG_ASSERT(cbuf);
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&cbuf->lock, state);
|
||||
|
||||
size_t ret = 0;
|
||||
if (cbuf_space_avail(cbuf) > 0) {
|
||||
@@ -171,7 +187,7 @@ size_t cbuf_write_char(cbuf_t *cbuf, char c, bool canreschedule)
|
||||
event_signal(&cbuf->event, canreschedule);
|
||||
}
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&cbuf->lock, state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -181,11 +197,13 @@ size_t cbuf_read_char(cbuf_t *cbuf, char *c, bool block)
|
||||
DEBUG_ASSERT(cbuf);
|
||||
DEBUG_ASSERT(c);
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
retry:
|
||||
if (block)
|
||||
event_wait(&cbuf->event);
|
||||
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&cbuf->lock, state);
|
||||
|
||||
// see if there's data available
|
||||
size_t ret = 0;
|
||||
if (cbuf->tail != cbuf->head) {
|
||||
@@ -201,7 +219,10 @@ size_t cbuf_read_char(cbuf_t *cbuf, char *c, bool block)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&cbuf->lock, state);
|
||||
|
||||
if (block && ret == 0)
|
||||
goto retry;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2009,2012,2014 Travis Geiselbrecht
|
||||
* Copyright (c) 2008-2009,2012-2014 Travis Geiselbrecht
|
||||
* Copyright (c) 2009 Corey Tabaka
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <string.h>
|
||||
#include <kernel/thread.h>
|
||||
#include <kernel/mutex.h>
|
||||
#include <kernel/spinlock.h>
|
||||
#include <lib/heap.h>
|
||||
|
||||
#define LOCAL_TRACE 0
|
||||
@@ -87,6 +88,7 @@ struct heap {
|
||||
mutex_t lock;
|
||||
struct list_node free_list;
|
||||
struct list_node delayed_free_list;
|
||||
spin_lock_t delayed_free_lock;
|
||||
};
|
||||
|
||||
// heap static vars
|
||||
@@ -124,12 +126,15 @@ static void heap_dump(void)
|
||||
list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
|
||||
dump_free_chunk(chunk);
|
||||
}
|
||||
mutex_release(&theheap.lock);
|
||||
|
||||
dprintf(INFO, "\tdelayed free list:\n");
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&theheap.delayed_free_lock, state);
|
||||
list_for_every_entry(&theheap.delayed_free_list, chunk, struct free_heap_chunk, node) {
|
||||
dump_free_chunk(chunk);
|
||||
}
|
||||
mutex_release(&theheap.lock);
|
||||
spin_unlock_irqrestore(&theheap.delayed_free_lock, state);
|
||||
}
|
||||
|
||||
static void heap_test(void)
|
||||
@@ -269,13 +274,14 @@ static void heap_free_delayed_list(void)
|
||||
|
||||
list_initialize(&list);
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&theheap.delayed_free_lock, state);
|
||||
|
||||
struct free_heap_chunk *chunk;
|
||||
while ((chunk = list_remove_head_type(&theheap.delayed_free_list, struct free_heap_chunk, node))) {
|
||||
list_add_head(&list, &chunk->node);
|
||||
}
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&theheap.delayed_free_lock, state);
|
||||
|
||||
while ((chunk = list_remove_head_type(&list, struct free_heap_chunk, node))) {
|
||||
LTRACEF("freeing chunk %p\n", chunk);
|
||||
@@ -464,9 +470,10 @@ void heap_delayed_free(void *ptr)
|
||||
|
||||
struct free_heap_chunk *chunk = heap_create_free_chunk(as->ptr, as->size, false);
|
||||
|
||||
enter_critical_section();
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&theheap.delayed_free_lock, state);
|
||||
list_add_head(&theheap.delayed_free_list, &chunk->node);
|
||||
exit_critical_section();
|
||||
spin_unlock_irqrestore(&theheap.delayed_free_lock, state);
|
||||
}
|
||||
|
||||
void heap_get_stats(struct heap_stats *ptr)
|
||||
@@ -541,6 +548,7 @@ void heap_init(void)
|
||||
|
||||
// initialize the delayed free list
|
||||
list_initialize(&theheap.delayed_free_list);
|
||||
spin_lock_init(&theheap.delayed_free_lock);
|
||||
|
||||
// set the heap range
|
||||
#if WITH_KERNEL_VM
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Travis Geiselbrecht
|
||||
* Copyright (c) 2013-2014 Travis Geiselbrecht
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files
|
||||
@@ -67,8 +67,6 @@ static void call_constructors(void)
|
||||
void lk_main(ulong arg0, ulong arg1, ulong arg2, ulong arg3) __NO_RETURN __EXTERNALLY_VISIBLE;
|
||||
void lk_main(ulong arg0, ulong arg1, ulong arg2, ulong arg3)
|
||||
{
|
||||
inc_critical_section();
|
||||
|
||||
// save the boot args
|
||||
lk_boot_args[0] = arg0;
|
||||
lk_boot_args[1] = arg1;
|
||||
|
||||
Reference in New Issue
Block a user