10 Commits

Author SHA1 Message Date
Travis Geiselbrecht
6910a87972 WIP refactor int disable/restore 2019-12-17 18:42:56 -08:00
Travis Geiselbrecht
26596b0a65 [arch][arm-m] optimize the arch_interrupt_save routine a bit for cortex-m
From looking at the dissassembly the compiler wasn't doing a good job
with the overly complicated version shared with the larger arm cores.
Simplify it by recognizing that you can save the state directly from
PRIMASK and restore it more simply.
2019-12-05 23:42:38 -08:00
Travis Geiselbrecht
82b5f5a9f1 Merge remote-tracking branch 'github/vax' 2019-12-05 23:04:52 -08:00
Travis Geiselbrecht
d06fbd35c8 [arch][vax] redid the exception code, added timers
The port more or less fully works now, except for actual interrupt
driven io and proper atomics.

Also hit a floating point exception in the string benchmark.
2019-10-21 02:52:38 -07:00
Travis Geiselbrecht
ac7683f84b [arch][vax] set a separate interrupt stack and clear some system registers on boot 2019-10-21 00:59:39 -07:00
Travis Geiselbrecht
23ebd514c1 [platform][vax] first working context switch
Timers aren't firing yet so the system locks up as soon as any timeout
is involved. Enough to run the command line for a bit.
2019-10-20 22:04:20 -07:00
Travis Geiselbrecht
d5ec654f9a [platform][vax] add some code to try to sniff the board type
This lets us select the console routines to use.
2019-10-17 23:23:32 -07:00
Travis Geiselbrecht
2760056b4b [kernel][novm] fix a warning that only shows up on vax 2019-10-17 22:49:45 -07:00
Travis Geiselbrecht
744ceef721 [platform][vax] add a tool to generate a MOP header
This is used by the legacy mop protocol that many vaxes speak.

The image header is extremely minimal, but good enough to satisfy the
mopd daemon, source at https://github.com/qu1j0t3/mopd
2019-10-17 22:36:55 -07:00
Travis Geiselbrecht
89705cb065 [arch][vax] initial stab at booting on a vax
Booting on a real MicroVAX 3100/40 via netboot and on simh emulating a
micrvax 3900. Doesn't fully work, lots of stuff is stubbed out, but it
starts to run and hits unimplemented bits and stops.
2019-10-12 21:54:04 -07:00
38 changed files with 1658 additions and 119 deletions

View File

@@ -279,9 +279,9 @@ void arch_quiesce(void) {
#if ARM_ISA_ARMV7
/* virtual to physical translation */
status_t arm_vtop(addr_t va, addr_t *pa) {
spin_lock_saved_state_t irqstate;
arch_interrupt_save_state_t irqstate;
arch_interrupt_save(&irqstate, SPIN_LOCK_FLAG_INTERRUPTS);
irqstate = arch_interrupt_save(SPIN_LOCK_FLAG_INTERRUPTS);
arm_write_ats1cpr(va & ~(PAGE_SIZE-1));
uint32_t par = arm_read_par();

View File

@@ -68,6 +68,80 @@ static inline bool arch_fiqs_disabled(void) {
return !!state;
}
#if !ARM_ISA_ARMV7M
enum {
/* ARM specific flags */
ARCH_INTERRUPT_SAVE_IRQ = 0x1,
ARCH_INTERRUPT_SAVE_FIQ = 0x2, /* Do not use unless IRQs are already disabled */
};
enum {
/* private */
ARCH_INTERRUPT_RESTORE_IRQ = 1,
ARCH_INTERRUPT_RESTORE_FIQ = 2,
};
static inline arch_interrupt_save_state_t
arch_interrupt_save(arch_interrupt_save_flags_t flags) {
arch_interrupt_save_state_t state = 0;
if ((flags & ARCH_INTERRUPT_SAVE_IRQ) && !arch_ints_disabled()) {
state |= ARCH_INTERRUPT_RESTORE_IRQ;
arch_disable_ints();
}
if ((flags & ARCH_INTERRUPT_SAVE_FIQ) && !arch_fiqs_disabled()) {
state |= ARCH_INTERRUPT_RESTORE_FIQ;
arch_disable_fiqs();
}
return state;
}
static inline void
arch_interrupt_restore(arch_interrupt_save_state_t old_state, arch_interrupt_save_flags_t flags) {
if ((flags & ARCH_INTERRUPT_SAVE_FIQ) && (old_state & ARCH_INTERRUPT_RESTORE_FIQ))
arch_enable_fiqs();
if ((flags & ARCH_INTERRUPT_SAVE_IRQ) && (old_state & ARCH_INTERRUPT_RESTORE_IRQ))
arch_enable_ints();
}
#else
/*
* slightly more optimized version of the interrupt save/restore bits for cortex-m
* processors.
*/
/* arm-m flags are mostly meaningless */
#define ARCH_INTERRUPT_SAVE_IRQ 1
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS ARCH_INTERRUPT_SAVE_IRQ
static inline arch_interrupt_save_state_t
arch_interrupt_save(arch_interrupt_save_flags_t flags) {
unsigned int state = 0;
if (flags == ARCH_INTERRUPT_SAVE_IRQ) {
__asm__ volatile("mrs %0, primask" : "=r"(state));
/* always disable ints, may be faster than testing and branching around it */
arch_disable_ints();
/* the state we return is just the saved value of PRIMASK */
}
return state;
}
static inline void
arch_interrupt_restore(arch_interrupt_save_state_t old_state, arch_interrupt_save_flags_t flags) {
/* test the PRIMASK's one bit */
if (flags == ARCH_INTERRUPT_SAVE_IRQ) {
if ((old_state & 0x1) == 0) {
arch_enable_ints();
}
}
}
#endif
static inline bool arch_in_int_handler(void) {
#if ARM_ISA_ARMV7M
uint32_t ipsr;
@@ -294,6 +368,41 @@ static inline bool arch_ints_disabled(void) {
return !!state;
}
/*
* slightly more optimized version of the interrupt save/restore bits for cortex-m
* processors.
*/
/* arm-m flags are mostly meaningless */
#define ARCH_INTERRUPT_SAVE_IRQ 1
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS ARCH_INTERRUPT_SAVE_IRQ
static inline arch_interrupt_save_state_t
arch_interrupt_save(arch_interrupt_save_flags_t flags) {
unsigned int state = 0;
if (flags == ARCH_INTERRUPT_SAVE_IRQ) {
__asm__ volatile("mrs %0, primask" : "=r"(state));
/* always disable ints, may be faster than testing and branching around it */
arch_disable_ints();
/* the state we return is just the saved value of PRIMASK */
}
return state;
}
static inline void
arch_interrupt_restore(arch_interrupt_save_state_t old_state, arch_interrupt_save_flags_t flags) {
/* test the PRIMASK's one bit */
if (flags == ARCH_INTERRUPT_SAVE_IRQ) {
if ((old_state & 0x1) == 0) {
arch_enable_ints();
}
}
}
static inline int atomic_add(volatile int *ptr, int val) {
int temp;
bool state;

View File

@@ -17,9 +17,6 @@ __BEGIN_CDECLS
typedef unsigned long spin_lock_t;
typedef unsigned long spin_lock_saved_state_t;
typedef unsigned long spin_lock_save_flags_t;
static inline void arch_spin_lock_init(spin_lock_t *lock) {
*lock = SPIN_LOCK_INITIAL_VALUE;
}
@@ -50,40 +47,7 @@ static inline void arch_spin_unlock(spin_lock_t *lock) {
#endif
/* ARM specific flags */
#define SPIN_LOCK_FLAG_IRQ 0x40000000
#define SPIN_LOCK_FLAG_FIQ 0x80000000 /* Do not use unless IRQs are already disabled */
#define SPIN_LOCK_FLAG_IRQ_FIQ (SPIN_LOCK_FLAG_IRQ | SPIN_LOCK_FLAG_FIQ)
/* default arm flag is to just disable plain irqs */
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS SPIN_LOCK_FLAG_IRQ
enum {
/* private */
SPIN_LOCK_STATE_RESTORE_IRQ = 1,
SPIN_LOCK_STATE_RESTORE_FIQ = 2,
};
static inline void
arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags) {
spin_lock_saved_state_t state = 0;
if ((flags & SPIN_LOCK_FLAG_IRQ) && !arch_ints_disabled()) {
state |= SPIN_LOCK_STATE_RESTORE_IRQ;
arch_disable_ints();
}
if ((flags & SPIN_LOCK_FLAG_FIQ) && !arch_fiqs_disabled()) {
state |= SPIN_LOCK_STATE_RESTORE_FIQ;
arch_disable_fiqs();
}
*statep = state;
}
static inline void
arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags) {
if ((flags & SPIN_LOCK_FLAG_FIQ) && (old_state & SPIN_LOCK_STATE_RESTORE_FIQ))
arch_enable_fiqs();
if ((flags & SPIN_LOCK_FLAG_IRQ) && (old_state & SPIN_LOCK_STATE_RESTORE_IRQ))
arch_enable_ints();
}
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS ARCH_INTERRUPT_SAVE_IRQ
__END_CDECLS

View File

@@ -0,0 +1,13 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
/* forward declarations of types used by arch_interupt_save and _restore */
typedef unsigned int arch_interrupt_save_state_t;
typedef int arch_interrupt_save_flags_t;

View File

@@ -57,6 +57,41 @@ static inline bool arch_fiqs_disabled(void) {
return !!state;
}
enum {
/* ARM specific flags */
ARCH_INTERRUPT_SAVE_IRQ = 0x1,
ARCH_INTERRUPT_SAVE_FIQ = 0x2, /* Do not use unless IRQs are already disabled */
};
enum {
/* private */
ARCH_INTERRUPT_RESTORE_IRQ = 1,
ARCH_INTERRUPT_RESTORE_FIQ = 2,
};
static inline arch_interrupt_save_state_t
arch_interrupt_save(arch_interrupt_save_flags_t flags) {
arch_interrupt_save_state_t state = 0;
if ((flags & ARCH_INTERRUPT_SAVE_IRQ) && !arch_ints_disabled()) {
state |= ARCH_INTERRUPT_RESTORE_IRQ;
arch_disable_ints();
}
if ((flags & ARCH_INTERRUPT_SAVE_FIQ) && !arch_fiqs_disabled()) {
state |= ARCH_INTERRUPT_RESTORE_FIQ;
arch_disable_fiqs();
}
return state;
}
static inline void
arch_interrupt_restore(arch_interrupt_save_state_t old_state, arch_interrupt_save_flags_t flags) {
if ((flags & ARCH_INTERRUPT_SAVE_FIQ) && (old_state & ARCH_INTERRUPT_RESTORE_FIQ))
arch_enable_fiqs();
if ((flags & ARCH_INTERRUPT_SAVE_IRQ) && (old_state & ARCH_INTERRUPT_RESTORE_IRQ))
arch_enable_ints();
}
#define mb() __asm__ volatile("dsb sy" : : : "memory")
#define rmb() __asm__ volatile("dsb ld" : : : "memory")
#define wmb() __asm__ volatile("dsb st" : : : "memory")

View File

@@ -14,9 +14,6 @@
typedef unsigned long spin_lock_t;
typedef unsigned int spin_lock_saved_state_t;
typedef unsigned int spin_lock_save_flags_t;
#if WITH_SMP
void arch_spin_lock(spin_lock_t *lock);
int arch_spin_trylock(spin_lock_t *lock);
@@ -43,52 +40,6 @@ static inline bool arch_spin_lock_held(spin_lock_t *lock) {
return *lock != 0;
}
enum {
/* Possible future flags:
* SPIN_LOCK_FLAG_PMR_MASK = 0x000000ff,
* SPIN_LOCK_FLAG_PREEMPTION = 0x10000000,
* SPIN_LOCK_FLAG_SET_PMR = 0x20000000,
*/
/* ARM specific flags */
SPIN_LOCK_FLAG_IRQ = 0x40000000,
SPIN_LOCK_FLAG_FIQ = 0x80000000, /* Do not use unless IRQs are already disabled */
SPIN_LOCK_FLAG_IRQ_FIQ = SPIN_LOCK_FLAG_IRQ | SPIN_LOCK_FLAG_FIQ,
/* Generic flags */
SPIN_LOCK_FLAG_INTERRUPTS = SPIN_LOCK_FLAG_IRQ,
};
/* default arm flag is to just disable plain irqs */
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS SPIN_LOCK_FLAG_INTERRUPTS
enum {
/* private */
SPIN_LOCK_STATE_RESTORE_IRQ = 1,
SPIN_LOCK_STATE_RESTORE_FIQ = 2,
};
static inline void
arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags) {
spin_lock_saved_state_t state = 0;
if ((flags & SPIN_LOCK_FLAG_IRQ) && !arch_ints_disabled()) {
state |= SPIN_LOCK_STATE_RESTORE_IRQ;
arch_disable_ints();
}
if ((flags & SPIN_LOCK_FLAG_FIQ) && !arch_fiqs_disabled()) {
state |= SPIN_LOCK_STATE_RESTORE_FIQ;
arch_disable_fiqs();
}
*statep = state;
}
static inline void
arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags) {
if ((flags & SPIN_LOCK_FLAG_FIQ) && (old_state & SPIN_LOCK_STATE_RESTORE_FIQ))
arch_enable_fiqs();
if ((flags & SPIN_LOCK_FLAG_IRQ) && (old_state & SPIN_LOCK_STATE_RESTORE_IRQ))
arch_enable_ints();
}
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS ARCH_INTERRUPT_SAVE_IRQ

View File

@@ -0,0 +1,13 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
/* forward declarations of types used by arch_interupt_save and _restore */
typedef unsigned int arch_interrupt_save_state_t;
typedef int arch_interrupt_save_flags_t;

View File

@@ -9,6 +9,7 @@
#ifndef ASSEMBLY
#include <arch/types.h>
#include <sys/types.h>
#include <stddef.h>
#include <stdbool.h>
@@ -21,6 +22,8 @@ static void arch_enable_ints(void);
static void arch_disable_ints(void);
static bool arch_ints_disabled(void);
static bool arch_in_int_handler(void);
static arch_interrupt_save_state_t arch_interrupt_save(arch_interrupt_save_flags_t flags);
static void arch_interrupt_restore(arch_interrupt_save_state_t old_state, arch_interrupt_save_flags_t flags);
static int atomic_swap(volatile int *ptr, int val);
static int atomic_add(volatile int *ptr, int val);

View File

@@ -44,10 +44,10 @@ static inline bool arch_spin_lock_held(spin_lock_t *lock) {
/* default arm flag is to just disable plain irqs */
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS 0
static inline void
arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags) {
static inline spin_lock_saved_state_t
arch_interrupt_save(spin_lock_save_flags_t flags) {
/* disable interrupts by clearing the MIE bit while atomically saving the old state */
*statep = riscv_csr_read_clear(mstatus, RISCV_STATUS_MIE) & RISCV_STATUS_MIE;
return riscv_csr_read_clear(mstatus, RISCV_STATUS_MIE) & RISCV_STATUS_MIE;
}
static inline void

115
arch/vax/arch.c Normal file
View File

@@ -0,0 +1,115 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <assert.h>
#include <lk/compiler.h>
#include <lk/debug.h>
#include <lk/trace.h>
#include <stdint.h>
#include <arch/ops.h>
#include <arch/vax.h>
#include <kernel/thread.h>
#define LOCAL_TRACE 0
// initial boot stack that start.S leaves us on
extern uint8_t boot_stack[1024];
static uint8_t irq_stack[512] __ALIGNED(4);
// defined in assembly
extern uint32_t SCB[];
extern void vax_undefined_exception(void);
extern void vax_exception_table(void);
static void dump_pr(const char *name, int reg) {
printf("%s\t%#x\n", name, mfpr(reg));
}
#define dump_pr_byname(name) \
dump_pr(#name, PR_##name)
static void dump_regs(void) {
dump_pr_byname(SID);
dump_pr_byname(KSP); // Kernel Stack Pointer
dump_pr_byname(ESP); // Executive Stack Pointer
dump_pr_byname(SSP); // Supervisor Stack Pointer
dump_pr_byname(USP); // User Stack Pointer
dump_pr_byname(ISP); // Interrupt Stack Pointer
dump_pr_byname(P0BR); // P0 Base Register
dump_pr_byname(P0LR); // P0 Length Register
dump_pr_byname(P1BR); // P1 Base Register
dump_pr_byname(P1LR); // P1 Length Register
dump_pr_byname(SBR); // System Base Register
dump_pr_byname(SLR); // System Limit Register
dump_pr_byname(PCBB); // Process Control Block Base
dump_pr_byname(SCBB); // System Control Block Base
dump_pr_byname(IPL); // Interrupt Priority Level
dump_pr_byname(MAPEN); // Memory Management Enable
}
void arch_early_init(void) {
// initialize any empty slots in the SCB
for (int i = 0; i < SCB_MAX_OFFSET / 4; i++) {
if (SCB[i] == 0) {
SCB[i] = ((uint32_t)&vax_exception_table + (i * 16)) | SCB_FLAG_KERNEL_STACK;
}
}
mtpr((uint32_t)SCB, PR_SCBB);
// point the pcb base register at the bootstrap thread's empty pcb.
// we'll switch from it later when starting the threading system.
mtpr((uint32_t)&get_current_thread()->arch.pcb, PR_PCBB);
get_current_thread()->arch.pcb.p0lr = (4<<24); // set the AST level to 4
// set the interrupt stack. currently unused, but set it to something safe for now.
mtpr((uint32_t)irq_stack + sizeof(irq_stack), PR_ISP);
// null out the mmu registers
mtpr(0, PR_MAPEN);
mtpr(0, PR_SBR);
mtpr(0, PR_SLR);
mtpr(0, PR_P0BR);
mtpr(0, PR_P0LR);
mtpr(0, PR_P1BR);
mtpr(0, PR_P1LR);
dump_regs();
}
void arch_init(void) {
// print some arch info
//dprintf(INFO, "RISCV: mvendorid %#lx marchid %#lx mimpid %#lx mhartid %#lx\n",
// riscv_csr_read(mvendorid), riscv_csr_read(marchid),
// riscv_csr_read(mimpid), riscv_csr_read(mhartid));
//dprintf(INFO, "RISCV: misa %#lx\n", riscv_csr_read(misa));
// enable external interrupts
//riscv_csr_set(mie, RISCV_MIE_MEIE);
}
void arch_idle(void) {
// __asm__ volatile("wfi");
}
void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
PANIC_UNIMPLEMENTED;
}
/* unimplemented cache operations */
void arch_disable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
void arch_enable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
void arch_clean_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
void arch_clean_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
void arch_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
void arch_sync_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }

43
arch/vax/asm.S Normal file
View File

@@ -0,0 +1,43 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lk/asm.h>
// void vax_context_switch(struct vax_pcb *newpcb);
FUNCTION(vax_context_switch)
.word 0 // nothing saved
// on the old stack save the current PSL and the PC to the exit of this function,
// to be popped by the svpctx instruction
movpsl -(%sp)
moval .Lreturn, -(%sp)
// load the new pcb into r0
movl 4(%ap),%r0
// save the full state of the cpu, switching to interrupt stack
svpctx
mtpr %r0,$0x10 // load the new PCBB
// load new process context, leaves the new PSL and PC on the stack
ldpctx
// return to the new thread
rei
.Lreturn:
// when an old thread is switch back to, arrange for the return address to be here
ret
// trampoline when initially starting a thread to get from a fake saved process
// context to the C world, which requires a calls instruction.
.globl vax_initial_thread_func
vax_initial_thread_func:
calls $0,initial_thread_func
halt

73
arch/vax/exceptions.S Normal file
View File

@@ -0,0 +1,73 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lk/asm.h>
#include <arch/vax.h>
// a global label, but not typed as a function
#define GLABEL(x) .globl x; x:
.text
// first level exception vectors must be 4 byte aligned
.balign 4
GLABEL(vax_undefined_exception)
halt
// for every possible exception handler, build a veneer routine that pushes
// the exception number to the stack and branches to a common routine
.balign 16
GLABEL(vax_exception_table)
.set i, 0
.rept (SCB_MAX_OFFSET / 4)
pushl $i
jbr interrupt_common
.balign 16
.set i, i + 1
.endr
END_DATA(vax_exception_table)
.align 4
GLABEL(interrupt_common)
halt
END_FUNCTION(interrupt_common)
.align 4
GLABEL(_vax_mcheck)
halt
END_FUNCTION(_vax_mcheck)
.align 4
GLABEL(_vax_invkstk)
halt
END_FUNCTION(_vax_invkstk)
.align 4
GLABEL(_vax_interval_timer)
pushr $0xfff
calls $0,vax_interval_timer
popr $0xfff
rei
END_FUNCTION(_vax_interval_timer)
// fill in a pre-computed SCB with a few vectors that we care about
#define SEEK(x) .org (x + SCB)
#define SCB_VECTOR(offset, funct, stack) SEEK(offset); .long funct + stack
.section .data
.balign 4
DATA(SCB)
SCB_VECTOR(4, _vax_mcheck, SCB_FLAG_INT_STACK)
SCB_VECTOR(8, _vax_invkstk, SCB_FLAG_INT_STACK)
//SCB_VECTOR(40, _vax_syscall, SCB_FLAG_KERNEL_STACK)
SCB_VECTOR(0xc0, _vax_interval_timer, SCB_FLAG_KERNEL_STACK)
SEEK(SCB_MAX_OFFSET)
END_DATA(SCB)

View File

@@ -0,0 +1,71 @@
/*
* Copyright (c) 2015 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <lk/compiler.h>
#include <lk/debug.h>
#include <arch/vax.h>
static inline void arch_enable_ints(void) {
// set the IPL to 0
mtpr(0, PR_IPL);
}
static inline void arch_disable_ints(void) {
// set the IPL to 31
mtpr(31, PR_IPL);
}
static inline bool arch_ints_disabled(void) {
uint32_t ipl = mfpr(PR_IPL);
return ipl > 0;
}
static inline int atomic_add(volatile int *ptr, int val) {
// XXX not actually atomic
int oldval = *ptr;
*ptr += val;
return oldval;
//return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_or(volatile int *ptr, int val) {
return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_and(volatile int *ptr, int val) {
return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
}
static inline int atomic_swap(volatile int *ptr, int val) {
return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
}
/* use a global pointer to store the current_thread */
extern struct thread *_current_thread;
static inline struct thread *get_current_thread(void) {
return _current_thread;
}
static inline void set_current_thread(struct thread *t) {
_current_thread = t;
}
static inline uint32_t arch_cycle_count(void) {
uint32_t count = 0;
//__asm__("rdcycle %0" : "=r"(count));
//count = riscv_csr_read(mcycle);
return count;
}
static inline uint arch_curr_cpu_num(void) {
return 0;
}

View File

@@ -0,0 +1,19 @@
/*
* Copyright (c) 2015 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <sys/types.h>
#include <arch/vax.h>
void vax_context_switch(struct vax_pcb *newpcb);
struct arch_thread {
// main process control block
struct vax_pcb pcb;
};

View File

@@ -0,0 +1,16 @@
/*
* Copyright (c) 2015 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#define PAGE_SIZE 512
#define PAGE_SIZE_SHIFT 9
// is this right?
#define CACHE_LINE 32
#define ARCH_DEFAULT_STACK_SIZE 4096

View File

@@ -0,0 +1,77 @@
/*
* Copyright (c) 2015 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#define RISCV_STATUS_SIE (1u << 2)
#define RISCV_STATUS_MIE (1u << 3)
#define RISCV_STATUS_MPIE (1u << 7)
#define RISCV_STATUS_MPP_MASK (3u << 11)
#define RISCV_MIE_MSIE (1u << 3)
#define RISCV_MIE_MTIE (1u << 7)
#define RISCV_MIE_SEIE (1u << 9)
#define RISCV_MIE_MEIE (1u << 11)
#define RISCV_MIP_MSIP (1u << 3)
#define RISCV_MIP_MTIP (1u << 7)
#define RISCV_MIP_MEIP (1u << 11)
#define RISCV_MCAUSE_INT (1u << 31)
#define riscv_csr_clear(csr, bits) \
({ \
ulong __val = bits; \
__asm__ volatile( \
"csrc " #csr ", %0" \
:: "rK" (__val) \
: "memory"); \
})
#define riscv_csr_read_clear(csr, bits) \
({ \
ulong __val = bits; \
ulong __val_out; \
__asm__ volatile( \
"csrrc %0, " #csr ", %1" \
: "=r"(__val_out) \
: "rK" (__val) \
: "memory"); \
__val_out; \
})
#define riscv_csr_set(csr, bits) \
({ \
ulong __val = bits; \
__asm__ volatile( \
"csrs " #csr ", %0" \
:: "rK" (__val) \
: "memory"); \
})
#define riscv_csr_read(csr) \
({ \
ulong __val; \
__asm__ volatile( \
"csrr %0, " #csr \
: "=r" (__val) \
:: "memory"); \
__val; \
})
#define riscv_csr_write(csr, val) \
({ \
ulong __val = (ulong)val; \
__asm__ volatile( \
"csrw " #csr ", %0" \
:: "rK" (__val) \
: "memory"); \
__val; \
})
void riscv_exception_entry(void);
enum handler_return riscv_timer_exception(void);

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2015 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <arch/ops.h>
#include <stdbool.h>
#if WITH_SMP
#error vax does not support SMP
#endif
#define SPIN_LOCK_INITIAL_VALUE (0)
typedef unsigned int spin_lock_t;
typedef unsigned int spin_lock_saved_state_t;
typedef unsigned int spin_lock_save_flags_t;
static inline void arch_spin_lock(spin_lock_t *lock) {
*lock = 1;
}
static inline int arch_spin_trylock(spin_lock_t *lock) {
return 0;
}
static inline void arch_spin_unlock(spin_lock_t *lock) {
*lock = 0;
}
static inline void arch_spin_lock_init(spin_lock_t *lock) {
*lock = SPIN_LOCK_INITIAL_VALUE;
}
static inline bool arch_spin_lock_held(spin_lock_t *lock) {
return *lock != 0;
}
/* default arm flag is to just disable plain irqs */
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS 0
static inline void
arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags) {
*statep = mfpr(PR_IPL);
mtpr(31, PR_IPL);
}
static inline void
arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags) {
mtpr(old_state, PR_IPL);
}

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <arch/vax/mtpr.h>
// Offsets into the SCB for various vectors that we care about.
// Many are not used yet, so dont fill all of them.
#define SCB_ADAPTER_BASE (0x100)
#define SCB_DEVICE_BASE (0x200)
#define SCB_MAX_OFFSET (0x600) // according to the 1987 vax arch manual
#define SCB_FLAG_KERNEL_STACK (0b00)
#define SCB_FLAG_INT_STACK (0b01)
#ifndef __ASSEMBLER__
#include <assert.h>
struct vax_pcb {
uint32_t ksp;
uint32_t esp;
uint32_t ssp;
uint32_t usp;
uint32_t r[14];
uint32_t pc;
uint32_t psl;
uint32_t p0br;
uint32_t p0lr;
uint32_t p1br;
uint32_t p1lr;
};
static_assert(sizeof(struct vax_pcb) == 96);
#endif // __ASSEMBLER__

View File

@@ -0,0 +1,185 @@
/* $NetBSD: mtpr.h,v 1.23 2017/05/22 17:12:11 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* All bugs are subject to removal without further notice */
#pragma once
/******************************************************************************
Processor register numbers in the VAX /IC
******************************************************************************/
#define PR_KSP 0 /* Kernel Stack Pointer */
#define PR_ESP 1 /* Executive Stack Pointer */
#define PR_SSP 2 /* Supervisor Stack Pointer */
#define PR_USP 3 /* User Stack Pointer */
#define PR_ISP 4 /* Interrupt Stack Pointer */
#define PR_P0BR 8 /* P0 Base Register */
#define PR_P0LR 9 /* P0 Length Register */
#define PR_P1BR 10 /* P1 Base Register */
#define PR_P1LR 11 /* P1 Length Register */
#define PR_SBR 12 /* System Base Register */
#define PR_SLR 13 /* System Limit Register */
#define PR_PCBB 16 /* Process Control Block Base */
#define PR_SCBB 17 /* System Control Block Base */
#define PR_IPL 18 /* Interrupt Priority Level */
#define PR_ASTLVL 19 /* AST Level */
#define PR_SIRR 20 /* Software Interrupt Request */
#define PR_SISR 21 /* Software Interrupt Summary */
#define PR_IPIR 22 /* KA820 Interprocessor register */
#define PR_MCSR 23 /* Machine Check Status Register 11/750 */
#define PR_ICCS 24 /* Interval Clock Control */
#define PR_NICR 25 /* Next Interval Count */
#define PR_ICR 26 /* Interval Count */
#define PR_TODR 27 /* Time Of Year (optional) */
#define PR_CSRS 28 /* Console Storage R/S */
#define PR_CSRD 29 /* Console Storage R/D */
#define PR_CSTS 30 /* Console Storage T/S */
#define PR_CSTD 31 /* Console Storage T/D */
#define PR_RXCS 32 /* Console Receiver C/S */
#define PR_RXDB 33 /* Console Receiver D/B */
#define PR_TXCS 34 /* Console Transmit C/S */
#define PR_TXDB 35 /* Console Transmit D/B */
#define PR_TBDR 36 /* Translation Buffer Group Disable Register 11/750 */
#define PR_CADR 37 /* Cache Disable Register 11/750 */
#define PR_MCESR 38 /* Machiune Check Error Summary Register 11/750 */
#define PR_CAER 39 /* Cache Error Register 11/750 */
#define PR_ACCS 40 /* Accelerator control register */
#define PR_SAVISP 41 /* Console Saved ISP */
#define PR_SAVPC 42 /* Console Saved PC */
#define PR_SAVPSL 43 /* Console Saved PSL */
#define PR_WCSA 44 /* WCS Address */
#define PR_WCSB 45 /* WCS Data */
#define PR_SBIFS 48 /* SBI Fault/Status */
#define PR_SBIS 49 /* SBI Silo */
#define PR_SBISC 50 /* SBI Silo Comparator */
#define PR_SBIMT 51 /* SBI Silo Maintenance */
#define PR_SBIER 52 /* SBI Error Register */
#define PR_SBITA 53 /* SBI Timeout Address Register */
#define PR_SBIQC 54 /* SBI Quadword Clear */
#define PR_IUR 55 /* Initialize Unibus Register 11/750 */
#define PR_MAPEN 56 /* Memory Management Enable */
#define PR_TBIA 57 /* Trans. Buf. Invalidate All */
#define PR_TBIS 58 /* Trans. Buf. Invalidate Single */
#define PR_TBDATA 59 /* Translation Buffer Data */
#define PR_MBRK 60 /* Microprogram Break */
#define PR_PMR 61 /* Performance Monnitor Enable */
#define PR_SID 62 /* System ID Register */
#define PR_TBCHK 63 /* Translation Buffer Check */
#define PR_PAMACC 64 /* Physical Address Memory Map Access (KA86) */
#define PR_PAMLOC 65 /* Physical Address Memory Map Location (KA86) */
#define PR_CSWP 66 /* Cache Sweep (KA86) */
#define PR_MDECC 67 /* MBOX Data Ecc Register (KA86) */
#define PR_MENA 68 /* MBOX Error Enable Register (KA86) */
#define PR_MDCTL 69 /* MBOX Data Control Register (KA86) */
#define PR_MCCTL 70 /* MBOX Mcc Control Register (KA86) */
#define PR_MERG 71 /* MBOX Error Generator Register (KA86) */
#define PR_CRBT 72 /* Console Reboot (KA86) */
#define PR_DFI 73 /* Diagnostic Fault Insertion Register (KA86) */
#define PR_EHSR 74 /* Error Handling Status Register (KA86) */
#define PR_STXCS 76 /* Console Storage C/S (KA86) */
#define PR_STXDB 77 /* Console Storage D/B (KA86) */
#define PR_ESPA 78 /* EBOX Scratchpad Address (KA86) */
#define PR_ESPD 79 /* EBOX Scratchpad Data (KA86) */
#define PR_RXCS1 80 /* Serial-Line Unit 1 Receive CSR (KA820) */
#define PR_RXDB1 81 /* Serial-Line Unit 1 Receive Data Buffer (KA820) */
#define PR_TXCS1 82 /* Serial-Line Unit 1 Transmit CSR (KA820) */
#define PR_TXDB1 83 /* Serial-Line Unit 1 Transmit Data Buffer (KA820) */
#define PR_RXCS2 84 /* Serial-Line Unit 2 Receive CSR (KA820) */
#define PR_RXDB2 85 /* Serial-Line Unit 2 Receive Data Buffer (KA820) */
#define PR_TXCS2 86 /* Serial-Line Unit 2 Transmit CSR (KA820) */
#define PR_TXDB2 87 /* Serial-Line Unit 2 Transmit Data Buffer (KA820) */
#define PR_RXCS3 88 /* Serial-Line Unit 3 Receive CSR (KA820) */
#define PR_RXDB3 89 /* Serial-Line Unit 3 Receive Data Buffer (KA820) */
#define PR_TXCS3 90 /* Serial-Line Unit 3 Transmit CSR (KA820) */
#define PR_TXDB3 91 /* Serial-Line Unit 3 Transmit Data Buffer (KA820) */
#define PR_RXCD 92 /* Receive Console Data from another CPU (KA820) */
#define PR_CACHEX 93 /* Cache invalidate Register (KA820) */
#define PR_BINID 94 /* VAXBI node ID Register (KA820) */
#define PR_BISTOP 95 /* VAXBI Stop Register (KA820) */
#define PR_BCBTS 113 /* Backup Cache Tag Store (KA670) */
#define PR_BCP1TS 114 /* Primary Tag Store 1st half (KA670) */
#define PR_BCP2TS 115 /* Primary Tag Store 2st half (KA670) */
#define PR_BCRFR 116 /* Refresh Register (KA670) */
#define PR_BCIDX 117 /* Index Register (KA670) */
#define PR_BCSTS 118 /* Status (KA670) */
#define PR_BCCTL 119 /* Control Register (KA670) */
#define PR_BCERR 120 /* Error Address (KA670) */
#define PR_BCFBTS 121 /* Flush backup tag store (KA670) */
#define PR_BCFPTS 122 /* Flush primary tag store (KA670) */
#define PR_VINTSR 123 /* vector i/f error status (KA43/KA46) */
#define PR_PCTAG 124 /* primary cache tag store (KA43/KA46) */
#define PR_PCIDX 125 /* primary cache index (KA43/KA46) */
#define PR_PCERR 126 /* primary cache error address (KA43/KA46) */
#define PR_PCSTS 127 /* primary cache status (KA43/KA46) */
#define PR_VPSR 144 /* Vector processor status register */
#define PR_VAER 145 /* Vector arithmetic error register */
#define PR_VMAC 146 /* Vector memory activity register */
#define PR_VTBIA 147 /* Vector TBIA */
#define PR_VSAR 148 /* Vector state address register */
#define PR_VIADR 157 /* Vector indirect address register */
#define PR_VIDLO 158 /* Vector indirect data low */
#define PR_VIDHI 159 /* Vector indirect data high */
/* Definitions for AST */
#define AST_NO 4
#define AST_OK 3
#ifndef __ASSEMBLER__
typedef unsigned int register_t;
static inline void
mtpr(register_t val, int reg)
{
__asm volatile (
"mtpr %0,%1"
: /* No output */
: "g" (val), "g" (reg)
: "memory");
}
static inline register_t
mfpr(int reg)
{
register_t __val;
__asm volatile (
"mfpr %1,%0"
: "=g" (__val)
: "g" (reg));
return __val;
}
#endif /* __ASSEMBLY__ */

118
arch/vax/linker.ld Normal file
View File

@@ -0,0 +1,118 @@
OUTPUT_FORMAT("elf32-vax", "elf32-vax", "elf32-vax")
OUTPUT_ARCH(vax)
ENTRY(_start)
SECTIONS
{
. = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%;
_start = .;
/* text/read-only data */
/* set the load address to physical MEMBASE */
.text : AT(%MEMBASE% + %KERNEL_LOAD_OFFSET%) {
KEEP(*(.text.boot.vectab1))
KEEP(*(.text.boot.vectab2))
KEEP(*(.text.boot))
*(.text* .sram.text.glue_7* .gnu.linkonce.t.*)
}
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
.rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
.rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
.rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
.rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
.rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
.rel.got : { *(.rel.got) }
.rela.got : { *(.rela.got) }
.rel.ctors : { *(.rel.ctors) }
.rela.ctors : { *(.rela.ctors) }
.rel.dtors : { *(.rel.dtors) }
.rela.dtors : { *(.rela.dtors) }
.rel.init : { *(.rel.init) }
.rela.init : { *(.rela.init) }
.rel.fini : { *(.rel.fini) }
.rela.fini : { *(.rela.fini) }
.rel.bss : { *(.rel.bss) }
.rela.bss : { *(.rela.bss) }
.rel.plt : { *(.rel.plt) }
.rela.plt : { *(.rela.plt) }
.init : { *(.init) } =0x9090
.plt : { *(.plt) }
/* .ARM.exidx is sorted, so has to go in its own output section. */
__exidx_start = .;
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
__exidx_end = .;
.rodata : ALIGN(4) {
__rodata_start = .;
__fault_handler_table_start = .;
KEEP(*(.rodata.fault_handler_table))
__fault_handler_table_end = .;
*(.rodata .rodata.* .gnu.linkonce.r.*)
}
/*
* extra linker scripts tend to insert sections just after .rodata,
* so we want to make sure this symbol comes after anything inserted above,
* but not aligned to the next section necessarily.
*/
.dummy_post_rodata : {
__rodata_end = .;
}
.data : ALIGN(512) {
/* writable data */
__data_start_rom = .;
/* in one segment binaries, the rom data address is on top of the ram data address */
__data_start = .;
*(.data .data.* .gnu.linkonce.d.*)
}
.ctors : ALIGN(4) {
__ctor_list = .;
KEEP(*(.ctors .init_array))
__ctor_end = .;
}
.dtors : ALIGN(4) {
__dtor_list = .;
KEEP(*(.dtors .fini_array))
__dtor_end = .;
}
.got : { *(.got.plt) *(.got) }
.dynamic : { *(.dynamic) }
/*
* extra linker scripts tend to insert sections just after .data,
* so we want to make sure this symbol comes after anything inserted above,
* but not aligned to the next section necessarily.
*/
.dummy_post_data : {
__data_end = .;
}
/* uninitialized data (in same segment as writable data) */
.bss : ALIGN(4) {
KEEP(*(.bss.prebss.*))
. = ALIGN(4);
__bss_start = .;
*(.bss .bss.*)
*(.gnu.linkonce.b.*)
*(COMMON)
. = ALIGN(4);
__bss_end = .;
}
_end = .;
. = %KERNEL_BASE% + %MEMSIZE%;
_end_of_ram = .;
/* Strip unnecessary stuff */
/DISCARD/ : { *(.comment .note .eh_frame) }
}

56
arch/vax/rules.mk Normal file
View File

@@ -0,0 +1,56 @@
LOCAL_DIR := $(GET_LOCAL_DIR)
MODULE := $(LOCAL_DIR)
MODULE_SRCS += $(LOCAL_DIR)/start.S
MODULE_SRCS += $(LOCAL_DIR)/arch.c
MODULE_SRCS += $(LOCAL_DIR)/asm.S
MODULE_SRCS += $(LOCAL_DIR)/exceptions.S
MODULE_SRCS += $(LOCAL_DIR)/thread.c
GLOBAL_DEFINES += \
SMP_MAX_CPUS=1
# set the default toolchain to vax-linux
ifndef TOOLCHAIN_PREFIX
TOOLCHAIN_PREFIX := vax-linux-
endif
WITH_LINKER_GC ?= 0
cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc /dev/null 2>&1`"; \
then echo "$(2)"; else echo "$(3)"; fi ;)
ARCH_COMPILEFLAGS :=
ARCH_OPTFLAGS := -O2
ARCH_LDFLAGS := -z max-page-size=512
LIBGCC := $(shell $(TOOLCHAIN_PREFIX)gcc $(GLOBAL_COMPILEFLAGS) $(ARCH_COMPILEFLAGS) $(GLOBAL_CFLAGS) -print-libgcc-file-name)
$(info LIBGCC = $(LIBGCC))
KERNEL_BASE ?= $(MEMBASE)
KERNEL_LOAD_OFFSET ?= 0
ROMBASE ?= 0
GLOBAL_DEFINES += \
ROMBASE=$(ROMBASE) \
MEMBASE=$(MEMBASE) \
MEMSIZE=$(MEMSIZE)
# potentially generated files that should be cleaned out with clean make rule
GENERATED += \
$(BUILDDIR)/linker.ld
# rules for generating the linker
$(BUILDDIR)/linker.ld: $(LOCAL_DIR)/linker.ld $(wildcard arch/*.ld) linkerscript.phony
@echo generating $@
@$(MKDIR)
$(NOECHO)sed "s/%ROMBASE%/$(ROMBASE)/;s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/;s/%KERNEL_BASE%/$(KERNEL_BASE)/;s/%KERNEL_LOAD_OFFSET%/$(KERNEL_LOAD_OFFSET)/;s/%VECTOR_BASE_PHYS%/$(VECTOR_BASE_PHYS)/" < $< > $@.tmp
@$(call TESTANDREPLACEFILE,$@.tmp,$@)
linkerscript.phony:
.PHONY: linkerscript.phony
LINKER_SCRIPT += $(BUILDDIR)/linker.ld
include make/module.mk

75
arch/vax/start.S Normal file
View File

@@ -0,0 +1,75 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
.section .text.boot
.globl _start
_start:
nop
nop
// save our bootargs
moval bootargs_end,%sp
pushr $0x1fff
// see if we need to be relocated
movab _start, %r0 // where we are
movl $_start, %r1 // where we want to be
cmpl %r0,%r1
beql relocated
// compute the copy length
subl3 %r1, $__data_end, %r2
// copy us down to the final location
1:
movb (%r0)+,(%r1)+
sobgtr %r2,1b
// zero bss
subl3 $_end, $__data_end, %r2
1:
movb $0,(%r1)+
sobgtr %r2,1b
// branch to our new spot
// use a REI to make sure the cpu dumps the pipeline
moval boot_stack_end,%sp // set the stack temporarily in the current memory spot
movpsl -(%sp)
movl $relocated, -(%sp)
rei
relocated:
// switch to kernel stack
moval boot_stack_end,%sp // set the interrupt stack in the relocated spot
movpsl -(%sp) // push the psl on the stack
bicl2 $(1<<26),(%sp) // clear bit 26 (IS)
moval 1f,-(%sp) // push the address of the end of this routine
rei // rei to it, loading the new PSL
1:
// now we should be using the kernel stack pointer, so re-set it
moval boot_stack_end,%sp
// branch into main and we're done
calls $0, lk_main
halt
.section .data
// declare bootargs here to make sure it goes in the data segment, since it's
// saved before the bss is zeroed out.
.balign 4
.globl bootargs
bootargs:
.skip 13*4
bootargs_end:
.section .bss
.balign 4
boot_stack:
.skip 1024
boot_stack_end:

81
arch/vax/thread.c Normal file
View File

@@ -0,0 +1,81 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <assert.h>
#include <lk/debug.h>
#include <lk/trace.h>
#include <sys/types.h>
#include <string.h>
#include <stdlib.h>
#include <kernel/thread.h>
#include <arch/vax.h>
#define LOCAL_TRACE 0
struct thread *_current_thread;
extern void vax_initial_thread_func(void);
void initial_thread_func(void) __NO_RETURN;
void initial_thread_func(void) {
DEBUG_ASSERT(arch_ints_disabled());
thread_t *ct = get_current_thread();
#if LOCAL_TRACE
LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
dump_thread(ct);
#endif
// release the thread lock that was implicitly held across the reschedule
spin_unlock(&thread_lock);
arch_enable_ints();
int ret = ct->entry(ct->arg);
LTRACEF("thread %p exiting with %d\n", ct, ret);
thread_exit(ret);
}
void arch_thread_initialize(thread_t *t) {
LTRACEF("t %p (%s)\n", t, t->name);
// zero out the arch thread context, including the PCB
memset(&t->arch, 0, sizeof(t->arch));
// initialize the top of the stack
uint32_t *stack_top = (uint32_t *)((uintptr_t)t->stack + t->stack_size);
t->arch.pcb.ksp = (uint32_t)stack_top;
// set the initial address to the initial_thread_func
t->arch.pcb.pc = (uint32_t)&vax_initial_thread_func;
t->arch.pcb.psl = (31 << 16); // IPL 31
t->arch.pcb.p0lr = (4 << 24); // ASTLVL 4
}
void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
DEBUG_ASSERT(arch_ints_disabled());
LTRACEF("old %p (%s) pcb %p, new %p (%s) pcb %p\n",
oldthread, oldthread->name, &oldthread->arch.pcb,
newthread, newthread->name, &newthread->arch.pcb);
if (LOCAL_TRACE) {
hexdump(&newthread->arch.pcb, sizeof(struct vax_pcb));
}
vax_context_switch(&newthread->arch.pcb);
}
void arch_dump_thread(thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "pcb %p, sp %#x\n", &t->arch.pcb, t->arch.pcb.ksp);
}
}

View File

@@ -13,6 +13,7 @@
#ifndef ASSEMBLY
#include <arch/x86.h>
#include <arch/types.h>
/* override of some routines */
static inline void arch_enable_ints(void) {
@@ -42,6 +43,19 @@ static inline bool arch_ints_disabled(void) {
return !(state & (1<<9));
}
/* flags are unused on x86 */
static inline arch_interrupt_save_state_t
arch_interrupt_save(arch_interrupt_save_flags_t flags) {
arch_interrupt_save_state_t state = x86_save_flags();
arch_disable_ints();
return state;
}
static inline void
arch_interrupt_restore(arch_interrupt_save_state_t old_state, arch_interrupt_save_flags_t flags) {
x86_restore_flags(old_state);
}
int _atomic_and(volatile int *ptr, int val);
int _atomic_or(volatile int *ptr, int val);
int _atomic_cmpxchg(volatile int *ptr, int oldval, int newval);

View File

@@ -15,9 +15,6 @@
typedef unsigned long spin_lock_t;
typedef x86_flags_t spin_lock_saved_state_t;
typedef uint spin_lock_save_flags_t;
/* simple implementation of spinlocks for no smp support */
static inline void arch_spin_lock_init(spin_lock_t *lock) {
*lock = SPIN_LOCK_INITIAL_VALUE;
@@ -39,18 +36,6 @@ static inline void arch_spin_unlock(spin_lock_t *lock) {
*lock = 0;
}
/* flags are unused on x86 */
#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS 0
static inline void
arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags) {
*statep = x86_save_flags();
arch_disable_ints();
}
static inline void
arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags) {
x86_restore_flags(old_state);
}

View File

@@ -0,0 +1,13 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
/* forward declarations of types used by arch_interupt_save and _restore */
typedef unsigned int arch_interrupt_save_state_t;
typedef int arch_interrupt_save_flags_t;

View File

@@ -35,23 +35,19 @@ static inline bool spin_lock_held(spin_lock_t *lock) {
return arch_spin_lock_held(lock);
}
/* spin lock irq save flags: */
/* Possible future flags:
* SPIN_LOCK_FLAG_PMR_MASK = 0x000000ff
* SPIN_LOCK_FLAG_PREEMPTION = 0x00000100
* SPIN_LOCK_FLAG_SET_PMR = 0x00000200
*/
/* Generic flags */
/* flags to the arch_interrupt_save routine */
#define SPIN_LOCK_FLAG_INTERRUPTS ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS
/* spin lock saved state is just an alias to the arch interrupt saved state */
typedef arch_interrupt_save_state_t spin_lock_saved_state_t;
/* same as spin lock, but save disable and save interrupt state first */
static inline void spin_lock_save(
spin_lock_t *lock,
spin_lock_saved_state_t *statep,
spin_lock_save_flags_t flags) {
arch_interrupt_save(statep, flags);
arch_interrupt_save_flags_t flags) {
*statep = arch_interrupt_save(flags);
spin_lock(lock);
}
@@ -59,7 +55,7 @@ static inline void spin_lock_save(
static inline void spin_unlock_restore(
spin_lock_t *lock,
spin_lock_saved_state_t old_state,
spin_lock_save_flags_t flags) {
arch_interrupt_save_flags_t flags) {
spin_unlock(lock);
arch_interrupt_restore(old_state, flags);
}

View File

@@ -0,0 +1,11 @@
LOCAL_DIR := $(GET_LOCAL_DIR)
ASM_STRING_OPS := #bcopy bzero memcpy memmove memset
MODULE_SRCS += \
#$(LOCAL_DIR)/memcpy.S \
#$(LOCAL_DIR)/memset.S
# filter out the C implementation
C_STRING_OPS := $(filter-out $(ASM_STRING_OPS),$(C_STRING_OPS))

87
platform/vax/console.c Normal file
View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lk/debug.h>
#include <platform.h>
#include <platform/debug.h>
#include "platform_p.h"
#define DEFAULT_PUTCHAR_ROUTINE putchar_mtfr
#define DEFAULT_GETCHAR_ROUTINE getchar_mtfr
// Console io via rom routine
unsigned int rom_putchar_addr = 0x20040068;
extern void rom_putchar(int c);
//unsigned int rom_getchar_addr = 0x20040068;
//extern void rom_getchar(int c);
// PR version
void putchar_mtfr(int c) {
// wait until ready
while ((mfpr(PR_TXCS) & 0x80) == 0)
;
// output char
mtpr((char)c, PR_TXDB);
}
int getchar_mtfr(void) {
if (mfpr(PR_RXCS) & (1<<7)) {
return mfpr(PR_RXDB) & 0xff;
}
return -1;
}
// select the above routine
void (*putchar_func)(int c) = &DEFAULT_PUTCHAR_ROUTINE;
int (*getchar_func)(void) = &DEFAULT_GETCHAR_ROUTINE;
void platform_dputc(char c) {
if (c == '\n')
putchar_func('\r');
putchar_func(c);
}
int platform_dgetc(char *c, bool wait) {
int ret = getchar_func();
if (ret > 0) {
*c = ret & 0xff;
return 1;
}
return -1;
}
// crash time versions, for the moment use the above
void platform_pputc(char c) {
platform_dputc(c);
}
int platform_pgetc(char *c, bool wait) {
return platform_dgetc(c, wait);
}
void platform_early_console_init(void) {
// TODO: decide what type of console to use based on vax machine
// Only understand a few at the moment
switch (vax_boardtype) {
case 0x14000004: // Microvax 3100/40
rom_putchar_addr = 0x20040068;
putchar_func = &rom_putchar;
//getchar_func = &rom_getchar;
break;
default:
// default is to use the pr routines
putchar_func = &putchar_mtfr;
getchar_func = &getchar_mtfr;
}
}
void platform_console_init(void) {
}

49
platform/vax/init.c Normal file
View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lk/debug.h>
#include <platform.h>
#include <platform/timer.h>
#include <arch/vax.h>
#include "platform_p.h"
uint32_t vax_cputype;
uint32_t vax_boardtype;
void platform_early_init(void) {
// decode what kind of vax we are so we can make a few platform decisions
// generally follows logic in netbsd sys/arch/vax/...
uint32_t sid = mfpr(PR_SID);
vax_cputype = sid >> 24;
vax_boardtype = vax_cputype << 24;
switch (vax_cputype) {
case 10: // CVAX
case 11: // RIGEL
case 18: // MARIAH
case 19: // NVAX
case 20: { // SOC
uint32_t sie = *(uint32_t *)(0x20040004);
vax_boardtype |= sie >> 24;
break;
}
}
platform_early_console_init();
printf("\n");
printf("VAX: sid %#x\n", sid);
printf("VAX: cputype %#x\n", vax_cputype);
printf("VAX: boardtype %#x\n", vax_boardtype);
platform_early_timer_init();
}
void platform_init(void) {
platform_console_init();
}

View File

@@ -0,0 +1,99 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <stdio.h>
#include <string.h>
#include <stdint.h>
// Copy input file to output, prepending a MOP header enough to satisfy
// the mopd daemon used to netboot an image on a VAX.
//
// Developed against mopd daemon at https://github.com/qu1j0t3/mopd
int main(int argc, char **argv) {
if (argc < 3) {
printf("not enough args");
usage:
printf("usage %s <infile> <outfile>\n", argv[0]);
return 1;
}
FILE *infp = fopen(argv[1], "rb");
if (!infp) {
printf("error opening file %s\n", argv[1]);
goto usage;
}
FILE *outfp = fopen(argv[2], "wb");
if (!infp) {
printf("error opening file %s\n", argv[2]);
goto usage;
}
// get length of the input file
fseek(infp, 0, SEEK_END);
long int len = ftell(infp);
//printf("input file length %#lx\n", len);
rewind(infp);
// round up to next 512 byte boundary
len = (len + 511) / 512 * 512;
//printf("rounded %#lx\n", len);
// generate the header
char buffer[512];
memset(buffer, 0, sizeof(buffer));
// main header
uint16_t isd = 0xd4;
uint16_t iha = 0x30;
uint8_t hbcnt = 1;
uint16_t image_type = 0xffff;
memcpy(buffer + 0, &isd, sizeof(isd));
memcpy(buffer + 2, &iha, sizeof(iha));
memcpy(buffer + 16, &hbcnt, sizeof(hbcnt));
memcpy(buffer + 510, &image_type, sizeof(image_type));
// image header (at offset isd)
uint16_t isize = len / 512;
uint16_t load_addr = 0 / 512;
memcpy(buffer + isd + 2, &isize, sizeof(isize));
memcpy(buffer + isd + 4, &load_addr, sizeof(load_addr));
// something else at iha
uint32_t xfer_addr = 0;
memcpy(buffer + iha, &xfer_addr, sizeof(xfer_addr));
// write the header out
fseek(outfp, 0, SEEK_SET);
fwrite(buffer, sizeof(buffer), 1, outfp);
// copy the input file to the output file
int written = 0;
while (!feof(infp)) {
char buf[512];
int count = fread(buf, 1, sizeof(buf), infp);
if (count == 0) {
break;
}
fwrite(buf, 1, count, outfp);
written += count;
}
// pad it out with zeros
while (written < len) {
char zero = 0;
fwrite(&zero, 1, 1, outfp);
written++;
}
fclose(infp);
fclose(outfp);
return 0;
}

19
platform/vax/platform_p.h Normal file
View File

@@ -0,0 +1,19 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <lib/cbuf.h>
// super simple cheesy system detection mechanism
// mostly cribbed from netbsd
extern uint32_t vax_cputype;
extern uint32_t vax_boardtype;
void platform_early_console_init(void);
void platform_console_init(void);
void platform_early_timer_init(void);

10
platform/vax/rom.S Normal file
View File

@@ -0,0 +1,10 @@
.text
.globl rom_putchar
.type rom_putchar@function
rom_putchar:
.word 0x0004 # save r2
movl 4(%ap), %r2
jsb *rom_putchar_addr
ret

57
platform/vax/rules.mk Normal file
View File

@@ -0,0 +1,57 @@
LOCAL_DIR := $(GET_LOCAL_DIR)
MODULE := $(LOCAL_DIR)
ARCH := vax
#MODULE_DEPS := \
lib/bio \
lib/cbuf \
lib/watchdog \
dev/cache/pl310 \
dev/interrupt/arm_gic \
dev/timer/arm_cortex_a9
MODULE_SRCS += \
$(LOCAL_DIR)/console.c \
$(LOCAL_DIR)/init.c \
$(LOCAL_DIR)/rom.S \
$(LOCAL_DIR)/timer.c \
# $(LOCAL_DIR)/clocks.c \
$(LOCAL_DIR)/debug.c \
$(LOCAL_DIR)/fpga.c \
$(LOCAL_DIR)/gpio.c \
$(LOCAL_DIR)/platform.c \
$(LOCAL_DIR)/qspi.c \
$(LOCAL_DIR)/spiflash.c \
$(LOCAL_DIR)/start.S \
$(LOCAL_DIR)/swdt.c \
$(LOCAL_DIR)/uart.c \
MEMBASE := 0x00000000
MEMSIZE ?= 0x00800000 # default to 8MB
KERNEL_LOAD_OFFSET := 0x00100000 # loaded 1MB into physical space
# put our kernel at 0x80000000 once we get the mmu running
#KERNEL_BASE = 0x80000000
KERNEL_BASE = 0
# tool to add a mop header to the output binary to be net bootable
MKMOPHEADER := $(call TOBUILDDIR,$(LOCAL_DIR)/mkmopheader)
$(MKMOPHEADER): $(LOCAL_DIR)/mkmopheader.c
@$(MKDIR)
$(NOECHO)echo compiling host tool $@; \
cc -O -Wall $< -o $@
GENERATED += $(MKMOPHEADER)
$(OUTBIN).mop: $(MKMOPHEADER) $(OUTBIN)
@$(MKDIR)
$(NOECHO)echo generating $@; \
$(MKMOPHEADER) $(OUTBIN) $@
EXTRA_BUILDDEPS += $(OUTBIN).mop
GENERATED += $(OUTBIN).mop
include make/module.mk

64
platform/vax/timer.c Normal file
View File

@@ -0,0 +1,64 @@
/*
* Copyright (c) 2019 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lk/debug.h>
#include <lk/err.h>
#include <lk/trace.h>
#include <kernel/thread.h>
#include <platform.h>
#include <platform/timer.h>
#include <arch/vax.h>
static uint32_t ticks;
static platform_timer_callback cb;
static void *cb_arg;
// stubbed out timer
status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval) {
// for the moment, assume the kernel is asking for 10ms
DEBUG_ASSERT(interval == 10);
cb = callback;
cb_arg = arg;
return NO_ERROR;
}
// fires once every 10ms
void vax_interval_timer(void) {
ticks++;
enum handler_return ret = cb(cb_arg, current_time());
if (ret == INT_RESCHEDULE) {
thread_preempt();
}
}
void platform_early_timer_init(void) {
// we can only assume there is a single 10ms interval timer available,
// but go ahead and configure it using the full register interface, since it
// wont hurt.
// load the next count register with -10000 usecs
mtpr(-10000, PR_NICR);
// start the timer and clear any old state
// clear error, clear interrupt, enable interrupt, load count, run
mtpr((1<<31) | (1<<7) | (1<<6) | (1<<4) | (1<<0), PR_ICCS);
}
void platform_timer_init(void) {
}
lk_time_t current_time() {
return ticks * 10;
}
lk_bigtime_t current_time_hires() {
return ticks * 10000;
}

6
project/vax-test.mk Normal file
View File

@@ -0,0 +1,6 @@
# main project for vim2
LOCAL_DIR := $(GET_LOCAL_DIR)
TARGET := vax
include project/virtual/test.mk

9
target/vax/rules.mk Normal file
View File

@@ -0,0 +1,9 @@
LOCAL_DIR := $(GET_LOCAL_DIR)
GLOBAL_INCLUDES += \
$(LOCAL_DIR)/include
PLATFORM := vax
#include make/module.mk

View File

@@ -12,3 +12,7 @@
#define LOCAL_FUNCTION(x) .type x,STT_FUNC; x:
#define LOCAL_DATA(x) .type x,STT_OBJECT; x:
#define END_FUNCTION(x) .size x, . - x
#define END_DATA(x) .size x, . - x