[arch][avr32] first stab at threading support

This commit is contained in:
Travis Geiselbrecht
2010-06-15 22:22:50 -07:00
parent 0bed6ea342
commit 223c9e7cb7
5 changed files with 91 additions and 31 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2009 Travis Geiselbrecht
* Copyright (c) 2009-2010 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
@@ -22,6 +22,7 @@
*/
#include <debug.h>
#include <arch.h>
#include <arch/avr32.h>
void arch_early_init(void)
{

56
arch/avr32/asm.S Normal file
View File

@@ -0,0 +1,56 @@
/*
* Copyright (c) 2010 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <asm.h>
/* context switch frame:
vaddr_t r0;
vaddr_t r1;
vaddr_t r2;
vaddr_t r3;
vaddr_t r4;
vaddr_t r5;
vaddr_t r6;
vaddr_t r7;
vaddr_t r8;
vaddr_t r9;
vaddr_t r10;
vaddr_t r11;
vaddr_t r12;
vaddr_t lr;
*/
/* void avr32_context_switch(addr_t *old_sp, addr_t new_sp); */
FUNCTION(avr32_context_switch)
/* save old state */
pushm r0-r12,lr
/* save old sp */
st.w r12[0], sp
/* load new one */
mov sp, r11
/* restore state and exit */
popm r0-r12,lr
icall lr

View File

@@ -32,6 +32,7 @@ FUNCTION(arch_enable_ints)
msr cpsr_c, r0
bx lr
#endif
retal r12
/* void arch_disable_ints(void); */
FUNCTION(arch_disable_ints)
@@ -41,6 +42,7 @@ FUNCTION(arch_disable_ints)
msr cpsr_c, r0
bx lr
#endif
retal r12
/* int atomic_swap(int *ptr, int val); */
FUNCTION(atomic_swap)
@@ -49,6 +51,7 @@ FUNCTION(atomic_swap)
mov r0, r2
bx lr
#endif
retal r12
/* int atomic_add(int *ptr, int val); */
FUNCTION(atomic_add)
@@ -85,6 +88,7 @@ FUNCTION(atomic_add)
bx lr
#endif
#endif
retal r12
/* int atomic_and(int *ptr, int val); */
FUNCTION(atomic_and)
@@ -121,6 +125,7 @@ FUNCTION(atomic_and)
bx lr
#endif
#endif
retal r12
/* int atomic_or(int *ptr, int val); */
FUNCTION(atomic_or)
@@ -157,6 +162,7 @@ FUNCTION(atomic_or)
bx lr
#endif
#endif
retal r12
/* void arch_idle(); */
FUNCTION(arch_idle)
@@ -175,22 +181,14 @@ FUNCTION(arch_idle)
#endif
bx lr
#endif
retal r12
/* void arch_switch_stacks_and_call(addr_t call, addr_t stack) */
FUNCTION(arch_switch_stacks_and_call)
#if 0
mov sp, r1
bx r0
#endif
mov sp, r11
icall r12
/* uint32_t arch_cycle_count(void); */
FUNCTION(arch_cycle_count)
#if 0
#if ARM_CPU_CORTEX_A8
mrc p15, 0, r0, c9, c13, 0
#else
mov r0, #0
#endif
bx lr
#endif
retal 0

View File

@@ -14,9 +14,9 @@ OBJS += \
$(LOCAL_DIR)/thread.o \
$(LOCAL_DIR)/arch.o \
$(LOCAL_DIR)/cache.o \
$(LOCAL_DIR)/asm.o \
# $(LOCAL_DIR)/asm.o \
$(LOCAL_DIR)/cache-ops.o \
# $(LOCAL_DIR)/cache-ops.o \
$(LOCAL_DIR)/exceptions.o \
$(LOCAL_DIR)/faults.o \
$(LOCAL_DIR)/mmu.o \

View File

@@ -28,22 +28,28 @@
#include <arch/avr32.h>
struct context_switch_frame {
vaddr_t r4;
vaddr_t r5;
vaddr_t r6;
vaddr_t r7;
vaddr_t r8;
vaddr_t r9;
vaddr_t r10;
vaddr_t r11;
vaddr_t lr;
vaddr_t r12;
vaddr_t r11;
vaddr_t r10;
vaddr_t r9;
vaddr_t r8;
vaddr_t r7;
vaddr_t r6;
vaddr_t r5;
vaddr_t r4;
vaddr_t r3;
vaddr_t r2;
vaddr_t r1;
vaddr_t r0;
};
extern void arm_context_switch(addr_t *old_sp, addr_t new_sp);
extern void avr32_context_switch(addr_t *old_sp, addr_t new_sp);
static void initial_thread_func(void) __NO_RETURN;
static void initial_thread_func(void)
{
PANIC_UNIMPLEMENTED;
#if 0
int ret;
@@ -63,13 +69,9 @@ static void initial_thread_func(void)
void arch_thread_initialize(thread_t *t)
{
#if 0
// create a default stack frame on the stack
vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
// make sure the top of the stack is 8 byte aligned for EABI compliance
stack_top = ROUNDDOWN(stack_top, 8);
struct context_switch_frame *frame = (struct context_switch_frame *)(stack_top);
frame--;
@@ -79,12 +81,15 @@ void arch_thread_initialize(thread_t *t)
// set the stack pointer
t->arch.sp = (vaddr_t)frame;
#endif
printf("finished initializing thread stack: thread %p, sp 0x%x\n", t, t->arch.sp);
hexdump(t->arch.sp, 64);
}
void arch_context_switch(thread_t *oldthread, thread_t *newthread)
{
// dprintf("arch_context_switch: old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
// arm_context_switch(&oldthread->arch.sp, newthread->arch.sp);
printf("arch_context_switch: old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
hexdump(newthread->arch.sp, 64);
avr32_context_switch(&oldthread->arch.sp, newthread->arch.sp);
}