[kernel] add new thread lifetime routines
-add proper thread_join/thread_detach mechanism -threads by default start in a joinable mode, which means another thread will have to thread_join() it to clean up the resources. -if the thread is marked detached it'll clean itself up when exiting. -return code is now actually readable in thread_join -allow thread struct and stack to be passed into thread_create_etc()
This commit is contained in:
@@ -51,5 +51,6 @@
|
||||
#define ERR_RECURSE_TOO_DEEP -23
|
||||
#define ERR_NOT_SUPPORTED -24
|
||||
#define ERR_TOO_BIG -25
|
||||
#define ERR_THREAD_DETACHED -26
|
||||
|
||||
#endif
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <arch/defines.h>
|
||||
#include <arch/ops.h>
|
||||
#include <arch/thread.h>
|
||||
#include <kernel/wait.h>
|
||||
|
||||
enum thread_state {
|
||||
THREAD_SUSPENDED = 0,
|
||||
@@ -46,6 +47,10 @@ enum thread_tls_list {
|
||||
MAX_TLS_ENTRY
|
||||
};
|
||||
|
||||
#define THREAD_FLAG_DETACHED 0x1
|
||||
#define THREAD_FLAG_FREE_STACK 0x2
|
||||
#define THREAD_FLAG_FREE_STRUCT 0x4
|
||||
|
||||
#define THREAD_MAGIC 'thrd'
|
||||
|
||||
typedef struct thread {
|
||||
@@ -58,6 +63,7 @@ typedef struct thread {
|
||||
enum thread_state state;
|
||||
int saved_critical_section_count;
|
||||
int remaining_quantum;
|
||||
unsigned int flags;
|
||||
|
||||
/* if blocked, a pointer to the wait queue */
|
||||
struct wait_queue *blocking_wait_queue;
|
||||
@@ -76,6 +82,7 @@ typedef struct thread {
|
||||
|
||||
/* return code */
|
||||
int retcode;
|
||||
struct wait_queue retcode_wait_queue;
|
||||
|
||||
/* thread local storage */
|
||||
uint32_t tls[MAX_TLS_ENTRY];
|
||||
@@ -107,9 +114,13 @@ void thread_become_idle(void) __NO_RETURN;
|
||||
void thread_set_name(const char *name);
|
||||
void thread_set_priority(int priority);
|
||||
thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size);
|
||||
thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size);
|
||||
status_t thread_resume(thread_t *);
|
||||
void thread_exit(int retcode) __NO_RETURN;
|
||||
void thread_sleep(lk_time_t delay);
|
||||
status_t thread_detach(thread_t *t);
|
||||
status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout);
|
||||
status_t thread_detach_and_resume(thread_t *t);
|
||||
|
||||
void dump_thread(thread_t *t);
|
||||
void dump_all_threads(void);
|
||||
@@ -133,20 +144,25 @@ extern int critical_section_count;
|
||||
|
||||
static inline __ALWAYS_INLINE void enter_critical_section(void)
|
||||
{
|
||||
CF;
|
||||
if (critical_section_count == 0)
|
||||
arch_disable_ints();
|
||||
critical_section_count++;
|
||||
CF;
|
||||
}
|
||||
|
||||
static inline __ALWAYS_INLINE void exit_critical_section(void)
|
||||
{
|
||||
CF;
|
||||
critical_section_count--;
|
||||
if (critical_section_count == 0)
|
||||
arch_enable_ints();
|
||||
CF;
|
||||
}
|
||||
|
||||
static inline __ALWAYS_INLINE bool in_critical_section(void)
|
||||
{
|
||||
CF;
|
||||
return critical_section_count > 0;
|
||||
}
|
||||
|
||||
@@ -167,48 +183,6 @@ static inline __ALWAYS_INLINE uint32_t tls_set(uint entry, uint32_t val)
|
||||
return oldval;
|
||||
}
|
||||
|
||||
/* wait queue stuff */
|
||||
#define WAIT_QUEUE_MAGIC 'wait'
|
||||
|
||||
typedef struct wait_queue {
|
||||
int magic;
|
||||
struct list_node list;
|
||||
int count;
|
||||
} wait_queue_t;
|
||||
|
||||
/* wait queue primitive */
|
||||
/* NOTE: must be inside critical section when using these */
|
||||
void wait_queue_init(wait_queue_t *);
|
||||
|
||||
/*
|
||||
* release all the threads on this wait queue with a return code of ERR_OBJECT_DESTROYED.
|
||||
* the caller must assure that no other threads are operating on the wait queue during or
|
||||
* after the call.
|
||||
*/
|
||||
void wait_queue_destroy(wait_queue_t *, bool reschedule);
|
||||
|
||||
/*
|
||||
* block on a wait queue.
|
||||
* return status is whatever the caller of wait_queue_wake_*() specifies.
|
||||
* a timeout other than INFINITE_TIME will set abort after the specified time
|
||||
* and return ERR_TIMED_OUT. a timeout of 0 will immediately return.
|
||||
*/
|
||||
status_t wait_queue_block(wait_queue_t *, lk_time_t timeout);
|
||||
|
||||
/*
|
||||
* release one or more threads from the wait queue.
|
||||
* reschedule = should the system reschedule if any is released.
|
||||
* wait_queue_error = what wait_queue_block() should return for the blocking thread.
|
||||
*/
|
||||
int wait_queue_wake_one(wait_queue_t *, bool reschedule, status_t wait_queue_error);
|
||||
int wait_queue_wake_all(wait_queue_t *, bool reschedule, status_t wait_queue_error);
|
||||
|
||||
/*
|
||||
* remove the thread from whatever wait queue it's in.
|
||||
* return an error if the thread is not currently blocked (or is the current thread)
|
||||
*/
|
||||
status_t thread_unblock_from_wait_queue(thread_t *t, bool reschedule, status_t wait_queue_error);
|
||||
|
||||
/* thread level statistics */
|
||||
#if DEBUGLEVEL > 1
|
||||
#define THREAD_STATS 1
|
||||
|
||||
76
include/kernel/wait.h
Normal file
76
include/kernel/wait.h
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2008-2012 Travis Geiselbrecht
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files
|
||||
* (the "Software"), to deal in the Software without restriction,
|
||||
* including without limitation the rights to use, copy, modify, merge,
|
||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __KERNEL_WAIT_H
|
||||
#define __KERNEL_WAIT_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <list.h>
|
||||
#include <compiler.h>
|
||||
#include <arch/defines.h>
|
||||
#include <arch/ops.h>
|
||||
#include <arch/thread.h>
|
||||
|
||||
/* wait queue stuff */
|
||||
#define WAIT_QUEUE_MAGIC 'wait'
|
||||
|
||||
typedef struct wait_queue {
|
||||
int magic;
|
||||
struct list_node list;
|
||||
int count;
|
||||
} wait_queue_t;
|
||||
|
||||
/* wait queue primitive */
|
||||
/* NOTE: must be inside critical section when using these */
|
||||
void wait_queue_init(wait_queue_t *);
|
||||
|
||||
/*
|
||||
* release all the threads on this wait queue with a return code of ERR_OBJECT_DESTROYED.
|
||||
* the caller must assure that no other threads are operating on the wait queue during or
|
||||
* after the call.
|
||||
*/
|
||||
void wait_queue_destroy(wait_queue_t *, bool reschedule);
|
||||
|
||||
/*
|
||||
* block on a wait queue.
|
||||
* return status is whatever the caller of wait_queue_wake_*() specifies.
|
||||
* a timeout other than INFINITE_TIME will set abort after the specified time
|
||||
* and return ERR_TIMED_OUT. a timeout of 0 will immediately return.
|
||||
*/
|
||||
status_t wait_queue_block(wait_queue_t *, lk_time_t timeout);
|
||||
|
||||
/*
|
||||
* release one or more threads from the wait queue.
|
||||
* reschedule = should the system reschedule if any is released.
|
||||
* wait_queue_error = what wait_queue_block() should return for the blocking thread.
|
||||
*/
|
||||
int wait_queue_wake_one(wait_queue_t *, bool reschedule, status_t wait_queue_error);
|
||||
int wait_queue_wake_all(wait_queue_t *, bool reschedule, status_t wait_queue_error);
|
||||
|
||||
/*
|
||||
* remove the thread from whatever wait queue it's in.
|
||||
* return an error if the thread is not currently blocked (or is the current thread)
|
||||
*/
|
||||
status_t thread_unblock_from_wait_queue(struct thread *t, bool reschedule, status_t wait_queue_error);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -88,17 +88,15 @@ void kmain(void)
|
||||
dprintf(SPEW, "initializing threads\n");
|
||||
thread_init();
|
||||
|
||||
// initialize the dpc system
|
||||
dprintf(SPEW, "initializing dpc\n");
|
||||
dpc_init();
|
||||
|
||||
// initialize kernel timers
|
||||
dprintf(SPEW, "initializing timers\n");
|
||||
timer_init();
|
||||
|
||||
// create a thread to complete system initialization
|
||||
dprintf(SPEW, "creating bootstrap completion thread\n");
|
||||
thread_resume(thread_create("bootstrap2", &bootstrap2, NULL, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE));
|
||||
thread_t *t = thread_create("bootstrap2", &bootstrap2, NULL, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
|
||||
thread_detach(t);
|
||||
thread_resume(t);
|
||||
|
||||
// become the idle thread and enable interrupts to start the scheduler
|
||||
thread_become_idle();
|
||||
@@ -112,6 +110,11 @@ static int bootstrap2(void *arg)
|
||||
|
||||
arch_init();
|
||||
|
||||
// initialize the dpc system
|
||||
#if WITH_LIB_DPC
|
||||
dpc_init();
|
||||
#endif
|
||||
|
||||
// XXX put this somewhere else
|
||||
#if WITH_LIB_BIO
|
||||
bio_init();
|
||||
|
||||
176
kernel/thread.c
176
kernel/thread.c
@@ -41,6 +41,7 @@
|
||||
#include <kernel/timer.h>
|
||||
#include <platform.h>
|
||||
#include <target.h>
|
||||
#include <lib/heap.h>
|
||||
|
||||
#if DEBUGLEVEL > 1
|
||||
#define THREAD_CHECKS 1
|
||||
@@ -139,13 +140,16 @@ static void init_thread_struct(thread_t *t, const char *name)
|
||||
*
|
||||
* @return Pointer to thread object, or NULL on failure.
|
||||
*/
|
||||
thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
|
||||
thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size)
|
||||
{
|
||||
thread_t *t;
|
||||
unsigned int flags = 0;
|
||||
|
||||
t = malloc(sizeof(thread_t));
|
||||
if (!t)
|
||||
return NULL;
|
||||
if (!t) {
|
||||
t = malloc(sizeof(thread_t));
|
||||
if (!t)
|
||||
return NULL;
|
||||
flags |= THREAD_FLAG_FREE_STRUCT;
|
||||
}
|
||||
|
||||
init_thread_struct(t, name);
|
||||
|
||||
@@ -157,15 +161,25 @@ thread_t *thread_create(const char *name, thread_start_routine entry, void *arg,
|
||||
t->blocking_wait_queue = NULL;
|
||||
t->wait_queue_block_ret = NO_ERROR;
|
||||
|
||||
t->retcode = 0;
|
||||
wait_queue_init(&t->retcode_wait_queue);
|
||||
|
||||
/* create the stack */
|
||||
t->stack = malloc(stack_size);
|
||||
if (!t->stack) {
|
||||
free(t);
|
||||
return NULL;
|
||||
if (!stack) {
|
||||
t->stack = malloc(stack_size);
|
||||
if (!t->stack) {
|
||||
if (flags & THREAD_FLAG_FREE_STRUCT)
|
||||
free(t);
|
||||
return NULL;
|
||||
}
|
||||
flags |= THREAD_FLAG_FREE_STACK;
|
||||
}
|
||||
|
||||
t->stack_size = stack_size;
|
||||
|
||||
/* save whether or not we need to free the thread struct and/or stack */
|
||||
t->flags = flags;
|
||||
|
||||
/* inheirit thread local storage from the parent */
|
||||
int i;
|
||||
for (i=0; i < MAX_TLS_ENTRY; i++)
|
||||
@@ -182,6 +196,11 @@ thread_t *thread_create(const char *name, thread_start_routine entry, void *arg,
|
||||
return t;
|
||||
}
|
||||
|
||||
thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
|
||||
{
|
||||
return thread_create_etc(NULL, name, entry, arg, priority, NULL, stack_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Make a suspended thread executable.
|
||||
*
|
||||
@@ -199,40 +218,99 @@ status_t thread_resume(thread_t *t)
|
||||
ASSERT(t->state != THREAD_DEATH);
|
||||
#endif
|
||||
|
||||
if (t->state == THREAD_READY || t->state == THREAD_RUNNING)
|
||||
return ERR_NOT_SUSPENDED;
|
||||
|
||||
enter_critical_section();
|
||||
t->state = THREAD_READY;
|
||||
insert_in_run_queue_head(t);
|
||||
thread_yield();
|
||||
if (t->state == THREAD_SUSPENDED) {
|
||||
t->state = THREAD_READY;
|
||||
insert_in_run_queue_head(t);
|
||||
thread_yield();
|
||||
}
|
||||
exit_critical_section();
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
static void thread_cleanup_dpc(void *thread)
|
||||
status_t thread_detach_and_resume(thread_t *t)
|
||||
{
|
||||
thread_t *t = (thread_t *)thread;
|
||||
status_t err;
|
||||
err = thread_detach(t);
|
||||
if (err < 0)
|
||||
return err;
|
||||
return thread_resume(t);
|
||||
}
|
||||
|
||||
// dprintf(SPEW, "thread_cleanup_dpc: thread %p (%s)\n", t, t->name);
|
||||
status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout)
|
||||
{
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
if (t->flags & THREAD_FLAG_DETACHED) {
|
||||
/* the thread is detached, go ahead and exit */
|
||||
exit_critical_section();
|
||||
return ERR_THREAD_DETACHED;
|
||||
}
|
||||
|
||||
/* wait for the thread to die */
|
||||
if (t->state != THREAD_DEATH) {
|
||||
status_t err = wait_queue_block(&t->retcode_wait_queue, timeout);
|
||||
if (err < 0) {
|
||||
exit_critical_section();
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
ASSERT(t->state == THREAD_DEATH);
|
||||
ASSERT(t->blocking_wait_queue == NULL);
|
||||
ASSERT(!list_in_list(&t->queue_node));
|
||||
#endif
|
||||
|
||||
/* save the return code */
|
||||
if (retcode)
|
||||
*retcode = t->retcode;
|
||||
|
||||
/* remove it from the master thread list */
|
||||
enter_critical_section();
|
||||
list_delete(&t->thread_list_node);
|
||||
|
||||
/* clear the structure's magic */
|
||||
t->magic = 0;
|
||||
|
||||
exit_critical_section();
|
||||
|
||||
/* free its stack and the thread structure itself */
|
||||
if (t->stack)
|
||||
if (t->flags & THREAD_FLAG_FREE_STACK && t->stack)
|
||||
free(t->stack);
|
||||
|
||||
free(t);
|
||||
if (t->flags & THREAD_FLAG_FREE_STRUCT)
|
||||
free(t);
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
status_t thread_detach(thread_t *t)
|
||||
{
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(t->magic == THREAD_MAGIC);
|
||||
#endif
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
/* if anyone is blocked on this thread, wake them up with a specific return code */
|
||||
wait_queue_wake_all(¤t_thread->retcode_wait_queue, false, ERR_THREAD_DETACHED);
|
||||
|
||||
/* if it's already dead, then just do what join would have and exit */
|
||||
if (t->state == THREAD_DEATH) {
|
||||
t->flags &= ~THREAD_FLAG_DETACHED; /* makes susre thread_join continues */
|
||||
exit_critical_section();
|
||||
return thread_join(t, NULL, 0);
|
||||
} else {
|
||||
t->flags |= THREAD_FLAG_DETACHED;
|
||||
exit_critical_section();
|
||||
return NO_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -257,8 +335,24 @@ void thread_exit(int retcode)
|
||||
current_thread->state = THREAD_DEATH;
|
||||
current_thread->retcode = retcode;
|
||||
|
||||
/* schedule a dpc to clean ourselves up */
|
||||
dpc_queue(thread_cleanup_dpc, (void *)current_thread, DPC_FLAG_NORESCHED);
|
||||
/* if we're detached, then do our teardown here */
|
||||
if (current_thread->flags & THREAD_FLAG_DETACHED) {
|
||||
/* remove it from the master thread list */
|
||||
list_delete(¤t_thread->thread_list_node);
|
||||
|
||||
/* clear the structure's magic */
|
||||
current_thread->magic = 0;
|
||||
|
||||
/* free its stack and the thread structure itself */
|
||||
if (current_thread->flags & THREAD_FLAG_FREE_STACK && current_thread->stack)
|
||||
heap_delayed_free(current_thread->stack);
|
||||
|
||||
if (current_thread->flags & THREAD_FLAG_FREE_STRUCT)
|
||||
heap_delayed_free(current_thread);
|
||||
} else {
|
||||
/* signal if anyone is waiting */
|
||||
wait_queue_wake_all(¤t_thread->retcode_wait_queue, false, 0);
|
||||
}
|
||||
|
||||
/* reschedule */
|
||||
thread_resched();
|
||||
@@ -311,21 +405,11 @@ void thread_resched(void)
|
||||
|
||||
newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
|
||||
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(newthread);
|
||||
#endif
|
||||
|
||||
if (list_is_empty(&run_queue[next_queue]))
|
||||
run_queue_bitmap &= ~(1<<next_queue);
|
||||
|
||||
#if 0
|
||||
// XXX make this more efficient
|
||||
newthread = NULL;
|
||||
for (i=HIGHEST_PRIORITY; i >= LOWEST_PRIORITY; i--) {
|
||||
newthread = list_remove_head_type(&run_queue[i], thread_t, queue_node);
|
||||
if (newthread)
|
||||
break;
|
||||
}
|
||||
#if THREAD_CHECKS
|
||||
ASSERT(newthread);
|
||||
#endif
|
||||
|
||||
// printf("newthread: ");
|
||||
@@ -553,6 +637,8 @@ void thread_init_early(void)
|
||||
t->priority = HIGHEST_PRIORITY;
|
||||
t->state = THREAD_RUNNING;
|
||||
t->saved_critical_section_count = 1;
|
||||
t->flags = THREAD_FLAG_DETACHED;
|
||||
wait_queue_init(&t->retcode_wait_queue);
|
||||
list_add_head(&thread_list, &t->thread_list_node);
|
||||
current_thread = t;
|
||||
}
|
||||
@@ -604,7 +690,6 @@ void thread_become_idle(void)
|
||||
thread_set_priority(IDLE_PRIORITY);
|
||||
idle_thread = current_thread;
|
||||
|
||||
|
||||
/* release the implicit boot critical section and yield to the scheduler */
|
||||
exit_critical_section();
|
||||
thread_yield();
|
||||
@@ -612,15 +697,30 @@ void thread_become_idle(void)
|
||||
idle_thread_routine();
|
||||
}
|
||||
|
||||
static const char *thread_state_to_str(enum thread_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case THREAD_SUSPENDED: return "susp";
|
||||
case THREAD_READY: return "rdy";
|
||||
case THREAD_RUNNING: return "run";
|
||||
case THREAD_BLOCKED: return "blok";
|
||||
case THREAD_SLEEPING: return "slep";
|
||||
case THREAD_DEATH: return "deth";
|
||||
default: return "unkn";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Dump debugging info about the specified thread.
|
||||
*/
|
||||
void dump_thread(thread_t *t)
|
||||
{
|
||||
dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
|
||||
dprintf(INFO, "\tstate %d, priority %d, remaining quantum %d, critical section %d\n", t->state, t->priority, t->remaining_quantum, t->saved_critical_section_count);
|
||||
dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d, critical section %d\n",
|
||||
thread_state_to_str(t->state), t->priority, t->remaining_quantum,
|
||||
t->saved_critical_section_count);
|
||||
dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
|
||||
dprintf(INFO, "\tentry %p, arg %p\n", t->entry, t->arg);
|
||||
dprintf(INFO, "\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
|
||||
dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
|
||||
dprintf(INFO, "\ttls:");
|
||||
int i;
|
||||
|
||||
Reference in New Issue
Block a user