Compare commits
2 Commits
wip/gicv3
...
wip/kprint
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8468e861f | ||
|
|
51d39e853f |
@@ -102,10 +102,10 @@ void arch_init(void) {
|
||||
DWT->CYCCNT = 0;
|
||||
DWT->CTRL |= DWT_CTRL_CYCCNTENA_Msk; // enable cycle counter
|
||||
#endif
|
||||
printf("CONTROL 0x%x\n", __get_CONTROL());
|
||||
kprintf("CONTROL 0x%x\n", __get_CONTROL());
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
printf("FPSCR 0x%x\n", __get_FPSCR());
|
||||
printf("FPCCR 0x%x\n", FPU->FPCCR);
|
||||
kprintf("FPSCR 0x%x\n", __get_FPSCR());
|
||||
kprintf("FPCCR 0x%x\n", FPU->FPCCR);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -16,52 +16,52 @@
|
||||
|
||||
static void dump_frame(const struct arm_cm_exception_frame *frame) {
|
||||
|
||||
printf("exception frame at %p\n", frame);
|
||||
printf("\tr0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x r4 0x%08x\n",
|
||||
kprintf("exception frame at %p\n", frame);
|
||||
kprintf("\tr0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x r4 0x%08x\n",
|
||||
frame->r0, frame->r1, frame->r2, frame->r3, frame->r4);
|
||||
printf("\tr5 0x%08x r6 0x%08x r7 0x%08x r8 0x%08x r9 0x%08x\n",
|
||||
kprintf("\tr5 0x%08x r6 0x%08x r7 0x%08x r8 0x%08x r9 0x%08x\n",
|
||||
frame->r5, frame->r6, frame->r7, frame->r8, frame->r9);
|
||||
printf("\tr10 0x%08x r11 0x%08x r12 0x%08x\n",
|
||||
kprintf("\tr10 0x%08x r11 0x%08x r12 0x%08x\n",
|
||||
frame->r10, frame->r11, frame->r12);
|
||||
printf("\tlr 0x%08x pc 0x%08x psr 0x%08x\n",
|
||||
kprintf("\tlr 0x%08x pc 0x%08x psr 0x%08x\n",
|
||||
frame->lr, frame->pc, frame->psr);
|
||||
}
|
||||
|
||||
void hardfault(struct arm_cm_exception_frame *frame) {
|
||||
printf("hardfault: ");
|
||||
kprintf("hardfault: ");
|
||||
dump_frame(frame);
|
||||
|
||||
#if (__CORTEX_M >= 0X03) || (__CORTEX_SC >= 300)
|
||||
printf("HFSR 0x%x\n", SCB->HFSR);
|
||||
kprintf("HFSR 0x%x\n", SCB->HFSR);
|
||||
#endif
|
||||
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
void memmanage(struct arm_cm_exception_frame *frame) {
|
||||
printf("memmanage: ");
|
||||
kprintf("memmanage: ");
|
||||
dump_frame(frame);
|
||||
|
||||
#if (__CORTEX_M >= 0X03) || (__CORTEX_SC >= 300)
|
||||
uint32_t mmfsr = SCB->CFSR & 0xff;
|
||||
|
||||
if (mmfsr & (1<<0)) { // IACCVIOL
|
||||
printf("instruction fault\n");
|
||||
kprintf("instruction fault\n");
|
||||
}
|
||||
if (mmfsr & (1<<1)) { // DACCVIOL
|
||||
printf("data fault\n");
|
||||
kprintf("data fault\n");
|
||||
}
|
||||
if (mmfsr & (1<<3)) { // MUNSTKERR
|
||||
printf("fault on exception return\n");
|
||||
kprintf("fault on exception return\n");
|
||||
}
|
||||
if (mmfsr & (1<<4)) { // MSTKERR
|
||||
printf("fault on exception entry\n");
|
||||
kprintf("fault on exception entry\n");
|
||||
}
|
||||
if (mmfsr & (1<<5)) { // MLSPERR
|
||||
printf("fault on lazy fpu preserve\n");
|
||||
kprintf("fault on lazy fpu preserve\n");
|
||||
}
|
||||
if (mmfsr & (1<<7)) { // MMARVALID
|
||||
printf("fault address 0x%x\n", SCB->MMFAR);
|
||||
kprintf("fault address 0x%x\n", SCB->MMFAR);
|
||||
}
|
||||
#endif
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
@@ -69,32 +69,32 @@ void memmanage(struct arm_cm_exception_frame *frame) {
|
||||
|
||||
|
||||
void usagefault(struct arm_cm_exception_frame *frame) {
|
||||
printf("usagefault: ");
|
||||
kprintf("usagefault: ");
|
||||
dump_frame(frame);
|
||||
|
||||
#if (__CORTEX_M >= 0x03)
|
||||
uint32_t ufsr = BITS_SHIFT(SCB->CFSR, 31, 16);
|
||||
printf("UFSR 0x%x: ", ufsr);
|
||||
kprintf("UFSR 0x%x: ", ufsr);
|
||||
|
||||
if (ufsr & (1<<0))
|
||||
printf("undefined instruction\n");
|
||||
kprintf("undefined instruction\n");
|
||||
if (ufsr & (1<<1))
|
||||
printf("ESPR invalid\n");
|
||||
kprintf("ESPR invalid\n");
|
||||
if (ufsr & (1<<2))
|
||||
printf("integrity check failed on EXC_RETURN\n");
|
||||
kprintf("integrity check failed on EXC_RETURN\n");
|
||||
if (ufsr & (1<<3))
|
||||
printf("coprocessor access error\n");
|
||||
kprintf("coprocessor access error\n");
|
||||
if (ufsr & (1<<8))
|
||||
printf("unaligned error\n");
|
||||
kprintf("unaligned error\n");
|
||||
if (ufsr & (1<<9))
|
||||
printf("division by zero\n");
|
||||
kprintf("division by zero\n");
|
||||
#endif
|
||||
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
void busfault(struct arm_cm_exception_frame *frame) {
|
||||
printf("busfault: ");
|
||||
kprintf("busfault: ");
|
||||
dump_frame(frame);
|
||||
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
@@ -103,7 +103,7 @@ void busfault(struct arm_cm_exception_frame *frame) {
|
||||
/* raw exception vectors */
|
||||
|
||||
void _nmi(void) {
|
||||
printf("nmi\n");
|
||||
kprintf("nmi\n");
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
@@ -167,11 +167,11 @@ __NAKED void _usagefault(void) {
|
||||
|
||||
/* systick handler */
|
||||
void __WEAK _systick(void) {
|
||||
printf("systick\n");
|
||||
kprintf("systick\n");
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
void __WEAK _debugmonitor(void) {
|
||||
printf("debugmonitor\n");
|
||||
kprintf("debugmonitor\n");
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
@@ -36,10 +36,10 @@ static platform_timer_callback cb;
|
||||
static void *cb_args;
|
||||
|
||||
static void arm_cm_systick_set_periodic(lk_time_t period) {
|
||||
LTRACEF("clk_freq %u, period %u\n", tick_rate, (uint)period);
|
||||
KLTRACEF("clk_freq %u, period %u\n", tick_rate, (uint)period);
|
||||
|
||||
uint32_t ticks = tick_rate / (1000 / period);
|
||||
LTRACEF("ticks %d\n", ticks);
|
||||
KLTRACEF("ticks %d\n", ticks);
|
||||
|
||||
SysTick->LOAD = (ticks & SysTick_LOAD_RELOAD_Msk) - 1;
|
||||
SysTick->VAL = 0;
|
||||
@@ -67,7 +67,7 @@ void _systick(void) {
|
||||
}
|
||||
|
||||
status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval) {
|
||||
LTRACEF("callback %p, arg %p, interval %u\n", callback, arg, interval);
|
||||
KLTRACEF("callback %p, arg %p, interval %u\n", callback, arg, interval);
|
||||
|
||||
DEBUG_ASSERT(tick_rate != 0 && tick_rate_mhz != 0);
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ static void initial_thread_func(void) __NO_RETURN;
|
||||
static void initial_thread_func(void) {
|
||||
int ret;
|
||||
|
||||
LTRACEF("thread %p calling %p with arg %p\n", _current_thread, _current_thread->entry, _current_thread->arg);
|
||||
KLTRACEF("thread %p calling %p with arg %p\n", _current_thread, _current_thread->entry, _current_thread->arg);
|
||||
#if LOCAL_TRACE
|
||||
dump_thread(_current_thread);
|
||||
#endif
|
||||
@@ -110,13 +110,13 @@ static void initial_thread_func(void) {
|
||||
|
||||
ret = _current_thread->entry(_current_thread->arg);
|
||||
|
||||
LTRACEF("thread %p exiting with %d\n", _current_thread, ret);
|
||||
KLTRACEF("thread %p exiting with %d\n", _current_thread, ret);
|
||||
|
||||
thread_exit(ret);
|
||||
}
|
||||
|
||||
void arch_thread_initialize(struct thread *t) {
|
||||
LTRACEF("thread %p, stack %p\n", t, t->stack);
|
||||
KLTRACEF("thread %p, stack %p\n", t, t->stack);
|
||||
|
||||
/* find the top of the stack and align it on an 8 byte boundary */
|
||||
uint32_t *sp = (void *)ROUNDDOWN((vaddr_t)t->stack + t->stack_size, 8);
|
||||
@@ -142,13 +142,13 @@ static volatile struct arm_cm_exception_frame_long *preempt_frame;
|
||||
static void pendsv(struct arm_cm_exception_frame_long *frame) {
|
||||
arch_disable_ints();
|
||||
|
||||
LTRACEF("preempting thread %p (%s)\n", _current_thread, _current_thread->name);
|
||||
KLTRACEF("preempting thread %p (%s)\n", _current_thread, _current_thread->name);
|
||||
|
||||
/* save the iframe the pendsv fired on and hit the preemption code */
|
||||
preempt_frame = frame;
|
||||
thread_preempt();
|
||||
|
||||
LTRACEF("fell through\n");
|
||||
KLTRACEF("fell through\n");
|
||||
|
||||
/* if we got here, there wasn't anything to switch to, so just fall through and exit */
|
||||
preempt_frame = NULL;
|
||||
@@ -360,19 +360,19 @@ __NAKED static void _thread_mode_bounce(bool fpused) {
|
||||
*/
|
||||
void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
LTRACEF("FPCCR.LSPACT %lu, FPCAR 0x%x, CONTROL.FPCA %lu\n",
|
||||
KLTRACEF("FPCCR.LSPACT %lu, FPCAR 0x%x, CONTROL.FPCA %lu\n",
|
||||
FPU->FPCCR & FPU_FPCCR_LSPACT_Msk, FPU->FPCAR, __get_CONTROL() & CONTROL_FPCA_Msk);
|
||||
#endif
|
||||
|
||||
/* if preempt_frame is set, we are being preempted */
|
||||
if (preempt_frame) {
|
||||
LTRACEF("we're preempted, old frame %p, old lr 0x%x, pc 0x%x, new preempted bool %d\n",
|
||||
KLTRACEF("we're preempted, old frame %p, old lr 0x%x, pc 0x%x, new preempted bool %d\n",
|
||||
preempt_frame, preempt_frame->lr, preempt_frame->pc, newthread->arch.was_preempted);
|
||||
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
/* see if extended fpu frame was pushed */
|
||||
if ((preempt_frame->lr & (1<<4)) == 0) {
|
||||
LTRACEF("thread %s pushed fpu frame\n", oldthread->name);
|
||||
KLTRACEF("thread %s pushed fpu frame\n", oldthread->name);
|
||||
|
||||
/* save the top part of the context */
|
||||
/* note this should also trigger a lazy fpu save if it hasn't already done so */
|
||||
@@ -394,7 +394,7 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
/* if new thread has saved fpu state, restore it */
|
||||
if (newthread->arch.fpused) {
|
||||
LTRACEF("newthread FPCCR.LSPACT %lu, FPCAR 0x%x, CONTROL.FPCA %lu\n",
|
||||
KLTRACEF("newthread FPCCR.LSPACT %lu, FPCAR 0x%x, CONTROL.FPCA %lu\n",
|
||||
FPU->FPCCR & FPU_FPCCR_LSPACT_Msk, FPU->FPCAR, __get_CONTROL() & CONTROL_FPCA_Msk);
|
||||
|
||||
/* enable the fpu manually */
|
||||
@@ -412,7 +412,7 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
|
||||
if (newthread->arch.was_preempted) {
|
||||
/* return directly to the preempted thread's iframe */
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
LTRACEF("newthread2 FPCCR.LSPACT %lu, FPCAR 0x%x, CONTROL.FPCA %lu\n",
|
||||
KLTRACEF("newthread2 FPCCR.LSPACT %lu, FPCAR 0x%x, CONTROL.FPCA %lu\n",
|
||||
FPU->FPCCR & FPU_FPCCR_LSPACT_Msk, FPU->FPCAR, __get_CONTROL() & CONTROL_FPCA_Msk);
|
||||
#endif
|
||||
__asm__ volatile(
|
||||
@@ -438,9 +438,9 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
|
||||
#endif
|
||||
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
LTRACEF("iretting to user space, fpused %u\n", newthread->arch.fpused);
|
||||
KLTRACEF("iretting to user space, fpused %u\n", newthread->arch.fpused);
|
||||
#else
|
||||
LTRACEF("iretting to user space\n");
|
||||
KLTRACEF("iretting to user space\n");
|
||||
#endif
|
||||
|
||||
__asm__ volatile(
|
||||
@@ -458,13 +458,13 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
|
||||
/* see if we have fpu state we need to save */
|
||||
if (!oldthread->arch.fpused && __get_CONTROL() & CONTROL_FPCA_Msk) {
|
||||
/* mark this thread as using float */
|
||||
LTRACEF("thread %s uses float\n", oldthread->name);
|
||||
KLTRACEF("thread %s uses float\n", oldthread->name);
|
||||
oldthread->arch.fpused = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (newthread->arch.was_preempted) {
|
||||
LTRACEF("not being preempted, but switching to preempted thread\n");
|
||||
KLTRACEF("not being preempted, but switching to preempted thread\n");
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
_half_save_and_svc(oldthread, newthread, oldthread->arch.fpused, newthread->arch.fpused);
|
||||
#else
|
||||
@@ -472,7 +472,7 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
|
||||
#endif
|
||||
} else {
|
||||
/* fast path, both sides did not preempt */
|
||||
LTRACEF("both sides are not preempted newsp 0x%lx\n", newthread->arch.sp);
|
||||
KLTRACEF("both sides are not preempted newsp 0x%lx\n", newthread->arch.sp);
|
||||
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
|
||||
_arch_non_preempt_context_switch(oldthread, newthread, oldthread->arch.fpused, newthread->arch.fpused);
|
||||
#else
|
||||
|
||||
@@ -167,9 +167,9 @@ void arm_secondary_entry(uint asm_cpu_num) {
|
||||
|
||||
arch_mp_init_percpu();
|
||||
|
||||
LTRACEF("cpu num %d\n", cpu);
|
||||
LTRACEF("sctlr 0x%x\n", arm_read_sctlr());
|
||||
LTRACEF("actlr 0x%x\n", arm_read_actlr());
|
||||
KLTRACEF("cpu num %d\n", cpu);
|
||||
KLTRACEF("sctlr 0x%x\n", arm_read_sctlr());
|
||||
KLTRACEF("actlr 0x%x\n", arm_read_actlr());
|
||||
|
||||
/* we're done, tell the main cpu we're up */
|
||||
atomic_add(&secondaries_to_init, -1);
|
||||
@@ -303,7 +303,7 @@ status_t arm_vtop(addr_t va, addr_t *pa) {
|
||||
#endif
|
||||
|
||||
void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
|
||||
LTRACEF("entry %p, args 0x%lx 0x%lx 0x%lx 0x%lx\n", entry, arg0, arg1, arg2, arg3);
|
||||
KLTRACEF("entry %p, args 0x%lx 0x%lx 0x%lx 0x%lx\n", entry, arg0, arg1, arg2, arg3);
|
||||
|
||||
/* we are going to shut down the system, start by disabling interrupts */
|
||||
arch_disable_ints();
|
||||
@@ -326,7 +326,7 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3
|
||||
/* add the low bits of the virtual address back */
|
||||
entry_pa |= ((addr_t)entry & 0xfff);
|
||||
|
||||
LTRACEF("entry pa 0x%lx\n", entry_pa);
|
||||
KLTRACEF("entry pa 0x%lx\n", entry_pa);
|
||||
|
||||
/* figure out the mapping for the chain load routine */
|
||||
if (arm_vtop((addr_t)&arm_chain_load, &loader_pa) < 0) {
|
||||
@@ -338,7 +338,7 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3
|
||||
|
||||
paddr_t loader_pa_section = ROUNDDOWN(loader_pa, SECTION_SIZE);
|
||||
|
||||
LTRACEF("loader address %p, phys 0x%lx, surrounding large page 0x%lx\n",
|
||||
KLTRACEF("loader address %p, phys 0x%lx, surrounding large page 0x%lx\n",
|
||||
&arm_chain_load, loader_pa, loader_pa_section);
|
||||
|
||||
/* using large pages, map around the target location */
|
||||
@@ -349,7 +349,7 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3
|
||||
loader_pa = (paddr_t)&arm_chain_load;
|
||||
#endif
|
||||
|
||||
LTRACEF("disabling instruction/data cache\n");
|
||||
KLTRACEF("disabling instruction/data cache\n");
|
||||
arch_disable_cache(UCACHE);
|
||||
#if WITH_DEV_CACHE_PL310
|
||||
pl310_set_enable(false);
|
||||
@@ -358,7 +358,7 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3
|
||||
/* put the booting cpu back into close to a default state */
|
||||
arch_quiesce();
|
||||
|
||||
LTRACEF("branching to physical address of loader\n");
|
||||
KLTRACEF("branching to physical address of loader\n");
|
||||
|
||||
/* branch to the physical address version of the chain loader routine */
|
||||
void (*loader)(paddr_t entry, ulong, ulong, ulong, ulong) __NO_RETURN = (void *)loader_pa;
|
||||
@@ -373,9 +373,9 @@ static void spinlock_test(void) {
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&lock, state);
|
||||
|
||||
TRACEF("cpu0: i have the lock\n");
|
||||
KTRACEF("cpu0: i have the lock\n");
|
||||
spin(1000000);
|
||||
TRACEF("cpu0: releasing it\n");
|
||||
KTRACEF("cpu0: releasing it\n");
|
||||
|
||||
spin_unlock_irqrestore(&lock, state);
|
||||
|
||||
@@ -389,9 +389,9 @@ static void spinlock_test_secondary(void) {
|
||||
spin_lock_saved_state_t state;
|
||||
spin_lock_irqsave(&lock, state);
|
||||
|
||||
TRACEF("cpu1: i have the lock\n");
|
||||
KTRACEF("cpu1: i have the lock\n");
|
||||
spin(250000);
|
||||
TRACEF("cpu1: releasing it\n");
|
||||
KTRACEF("cpu1: releasing it\n");
|
||||
|
||||
spin_unlock_irqrestore(&lock, state);
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ static void dcc_rx_callback(uint32_t val) {
|
||||
static int count = 0;
|
||||
count += 4;
|
||||
if ((count % 1000) == 0)
|
||||
printf("count %d\n", count);
|
||||
kprintf("count %d\n", count);
|
||||
}
|
||||
|
||||
static int cmd_dcc(int argc, const console_cmd_args *argv) {
|
||||
|
||||
@@ -46,7 +46,7 @@ void arm_fpu_undefined_instruction(struct arm_iframe *frame) {
|
||||
panic("floating point code in irq context. pc 0x%x\n", frame->pc);
|
||||
}
|
||||
|
||||
LTRACEF("enabling fpu on thread %p\n", t);
|
||||
KLTRACEF("enabling fpu on thread %p\n", t);
|
||||
|
||||
t->arch.fpused = true;
|
||||
arm_fpu_thread_swap(NULL, t);
|
||||
@@ -65,7 +65,7 @@ void arm_fpu_thread_initialize(struct thread *t) {
|
||||
}
|
||||
|
||||
void arm_fpu_thread_swap(struct thread *oldthread, struct thread *newthread) {
|
||||
LTRACEF("old %p (%d), new %p (%d)\n",
|
||||
KLTRACEF("old %p (%d), new %p (%d)\n",
|
||||
oldthread, oldthread ? oldthread->arch.fpused : 0,
|
||||
newthread, newthread ? newthread->arch.fpused : 0);
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ static inline bool is_valid_vaddr(arch_aspace_t *aspace, vaddr_t vaddr) {
|
||||
static void arm_mmu_map_section(arch_aspace_t *aspace, addr_t paddr, addr_t vaddr, uint flags) {
|
||||
int index;
|
||||
|
||||
LTRACEF("aspace %p tt %p pa 0x%lx va 0x%lx flags 0x%x\n", aspace, aspace->tt_virt, paddr, vaddr, flags);
|
||||
KLTRACEF("aspace %p tt %p pa 0x%lx va 0x%lx flags 0x%x\n", aspace, aspace->tt_virt, paddr, vaddr, flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(aspace->tt_virt);
|
||||
@@ -218,7 +218,7 @@ void arch_disable_mmu(void) {
|
||||
|
||||
void arch_mmu_context_switch(arch_aspace_t *aspace) {
|
||||
if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH)
|
||||
LTRACEF("aspace %p\n", aspace);
|
||||
KLTRACEF("aspace %p\n", aspace);
|
||||
|
||||
uint32_t ttbr;
|
||||
uint32_t ttbcr = arm_read_ttbcr();
|
||||
@@ -231,13 +231,13 @@ void arch_mmu_context_switch(arch_aspace_t *aspace) {
|
||||
}
|
||||
|
||||
if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH)
|
||||
LTRACEF("ttbr 0x%x, ttbcr 0x%x\n", ttbr, ttbcr);
|
||||
KLTRACEF("ttbr 0x%x, ttbcr 0x%x\n", ttbr, ttbcr);
|
||||
arm_write_ttbr0(ttbr);
|
||||
arm_write_ttbcr(ttbcr);
|
||||
}
|
||||
|
||||
status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) {
|
||||
LTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr);
|
||||
KLTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(aspace->tt_virt);
|
||||
@@ -405,7 +405,7 @@ static status_t get_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t *
|
||||
|
||||
*ppa = pa + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
|
||||
|
||||
LTRACEF("allocated pagetable at %p, pa 0x%lx, pa 0x%lx\n", l2_va, pa, *ppa);
|
||||
KLTRACEF("allocated pagetable at %p, pa 0x%lx, pa 0x%lx\n", l2_va, pa, *ppa);
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
@@ -432,7 +432,7 @@ static void put_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t l2_pa
|
||||
|
||||
list_delete(&page->node);
|
||||
|
||||
LTRACEF("freeing pagetable at 0x%lx\n", l2_pa);
|
||||
KLTRACEF("freeing pagetable at 0x%lx\n", l2_pa);
|
||||
pmm_free_page(page);
|
||||
}
|
||||
|
||||
@@ -455,7 +455,7 @@ static inline bool are_regions_compatible(uint new_region_flags,
|
||||
vaddr_t arch_mmu_pick_spot(vaddr_t base, uint prev_region_flags,
|
||||
vaddr_t end, uint next_region_flags,
|
||||
vaddr_t align, size_t size, uint flags) {
|
||||
LTRACEF("base 0x%lx, end 0x%lx, align %ld, size %zd, flags 0x%x\n",
|
||||
KLTRACEF("base 0x%lx, end 0x%lx, align %ld, size %zd, flags 0x%x\n",
|
||||
base, end, align, size, flags);
|
||||
|
||||
vaddr_t spot;
|
||||
@@ -482,7 +482,7 @@ vaddr_t arch_mmu_pick_spot(vaddr_t base, uint prev_region_flags,
|
||||
|
||||
|
||||
int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count, uint flags) {
|
||||
LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);
|
||||
KLTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(aspace->tt_virt);
|
||||
@@ -528,7 +528,7 @@ int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count,
|
||||
uint l1_index = vaddr / SECTION_SIZE;
|
||||
uint32_t tt_entry = aspace->tt_virt[l1_index];
|
||||
|
||||
LTRACEF("tt_entry 0x%x\n", tt_entry);
|
||||
KLTRACEF("tt_entry 0x%x\n", tt_entry);
|
||||
switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
|
||||
case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
|
||||
// XXX will have to break L1 mapping into a L2 page table
|
||||
@@ -549,7 +549,7 @@ int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count,
|
||||
/* fallthrough */
|
||||
case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
|
||||
uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
|
||||
LTRACEF("l2_table at %p\n", l2_table);
|
||||
KLTRACEF("l2_table at %p\n", l2_table);
|
||||
|
||||
DEBUG_ASSERT(l2_table);
|
||||
|
||||
@@ -592,7 +592,7 @@ int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
|
||||
if (!IS_PAGE_ALIGNED(vaddr))
|
||||
return ERR_INVALID_ARGS;
|
||||
|
||||
LTRACEF("vaddr 0x%lx count %u\n", vaddr, count);
|
||||
KLTRACEF("vaddr 0x%lx count %u\n", vaddr, count);
|
||||
|
||||
int unmapped = 0;
|
||||
while (count > 0) {
|
||||
@@ -674,7 +674,7 @@ int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
|
||||
}
|
||||
|
||||
status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) {
|
||||
LTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags);
|
||||
KLTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
|
||||
@@ -706,18 +706,18 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
|
||||
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
|
||||
}
|
||||
|
||||
LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
|
||||
KLTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
|
||||
LTRACEF("aspace %p\n", aspace);
|
||||
KLTRACEF("aspace %p\n", aspace);
|
||||
|
||||
// XXX free all of the pages allocated in aspace->pt_page_list
|
||||
vm_page_t *p;
|
||||
while ((p = list_remove_head_type(&aspace->pt_page_list, vm_page_t, node)) != NULL) {
|
||||
LTRACEF("freeing page %p\n", p);
|
||||
KLTRACEF("freeing page %p\n", p);
|
||||
pmm_free_page(p);
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ extern void bcm28xx_send_ipi(uint irq, uint cpu_mask);
|
||||
#define GIC_IPI_BASE (14)
|
||||
|
||||
status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
LTRACEF("target 0x%x, ipi %u\n", target, ipi);
|
||||
KLTRACEF("target 0x%x, ipi %u\n", target, ipi);
|
||||
|
||||
#if WITH_DEV_INTERRUPT_ARM_GIC
|
||||
uint gic_ipi_num = ipi + GIC_IPI_BASE;
|
||||
@@ -35,7 +35,7 @@ status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
/* filter out targets outside of the range of cpus we care about */
|
||||
target &= ((1UL << SMP_MAX_CPUS) - 1);
|
||||
if (target != 0) {
|
||||
LTRACEF("target 0x%x, gic_ipi %u\n", target, gic_ipi_num);
|
||||
KLTRACEF("target 0x%x, gic_ipi %u\n", target, gic_ipi_num);
|
||||
u_int flags = 0;
|
||||
#if WITH_LIB_SM
|
||||
flags |= ARM_GIC_SGI_FLAG_NS;
|
||||
@@ -54,13 +54,13 @@ status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
}
|
||||
|
||||
enum handler_return arm_ipi_generic_handler(void *arg) {
|
||||
LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
KLTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
|
||||
return INT_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
enum handler_return arm_ipi_reschedule_handler(void *arg) {
|
||||
LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
KLTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
|
||||
return mp_mbx_reschedule_irq();
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ void arch_thread_initialize(thread_t *t) {
|
||||
}
|
||||
|
||||
void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
|
||||
// TRACEF("arch_context_switch: cpu %u old %p (%s), new %p (%s)\n", arch_curr_cpu_num(), oldthread, oldthread->name, newthread, newthread->name);
|
||||
// KTRACEF("arch_context_switch: cpu %u old %p (%s), new %p (%s)\n", arch_curr_cpu_num(), oldthread, oldthread->name, newthread, newthread->name);
|
||||
#if ARM_WITH_VFP
|
||||
arm_fpu_thread_swap(oldthread, newthread);
|
||||
#endif
|
||||
|
||||
@@ -74,13 +74,13 @@ void arch_init(void) {
|
||||
#if WITH_SMP
|
||||
arch_mp_init_percpu();
|
||||
|
||||
LTRACEF("midr_el1 0x%llx\n", ARM64_READ_SYSREG(midr_el1));
|
||||
KLTRACEF("midr_el1 0x%llx\n", ARM64_READ_SYSREG(midr_el1));
|
||||
|
||||
secondaries_to_init = SMP_MAX_CPUS - 1; /* TODO: get count from somewhere else, or add cpus as they boot */
|
||||
|
||||
lk_init_secondary_cpus(secondaries_to_init);
|
||||
|
||||
LTRACEF("releasing %d secondary cpus\n", secondaries_to_init);
|
||||
KLTRACEF("releasing %d secondary cpus\n", secondaries_to_init);
|
||||
|
||||
/* release the secondary cpus */
|
||||
spin_unlock(&arm_boot_cpu_lock);
|
||||
@@ -151,7 +151,7 @@ void arm64_secondary_entry(ulong asm_cpu_num) {
|
||||
|
||||
arch_mp_init_percpu();
|
||||
|
||||
LTRACEF("cpu num %d\n", cpu);
|
||||
KLTRACEF("cpu num %d\n", cpu);
|
||||
|
||||
/* we're done, tell the main cpu we're up */
|
||||
atomic_add(&secondaries_to_init, -1);
|
||||
|
||||
@@ -129,7 +129,7 @@ static void print_fault_msg(uint32_t fsc)
|
||||
|
||||
for (i = 0; i < countof(fsc_map); i++) {
|
||||
if (fsc_map[i].fsc == fsc) {
|
||||
printf("%s\n", fsc_map[i].fault_msg);
|
||||
kprintf("%s\n", fsc_map[i].fault_msg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -139,17 +139,17 @@ extern struct fault_handler_table_entry __fault_handler_table_start[];
|
||||
extern struct fault_handler_table_entry __fault_handler_table_end[];
|
||||
|
||||
static void dump_iframe(const struct arm64_iframe_long *iframe) {
|
||||
printf("iframe %p:\n", iframe);
|
||||
printf("x0 0x%16llx x1 0x%16llx x2 0x%16llx x3 0x%16llx\n", iframe->r[0], iframe->r[1], iframe->r[2], iframe->r[3]);
|
||||
printf("x4 0x%16llx x5 0x%16llx x6 0x%16llx x7 0x%16llx\n", iframe->r[4], iframe->r[5], iframe->r[6], iframe->r[7]);
|
||||
printf("x8 0x%16llx x9 0x%16llx x10 0x%16llx x11 0x%16llx\n", iframe->r[8], iframe->r[9], iframe->r[10], iframe->r[11]);
|
||||
printf("x12 0x%16llx x13 0x%16llx x14 0x%16llx x15 0x%16llx\n", iframe->r[12], iframe->r[13], iframe->r[14], iframe->r[15]);
|
||||
printf("x16 0x%16llx x17 0x%16llx x18 0x%16llx x19 0x%16llx\n", iframe->r[16], iframe->r[17], iframe->r[18], iframe->r[19]);
|
||||
printf("x20 0x%16llx x21 0x%16llx x22 0x%16llx x23 0x%16llx\n", iframe->r[20], iframe->r[21], iframe->r[22], iframe->r[23]);
|
||||
printf("x24 0x%16llx x25 0x%16llx x26 0x%16llx x27 0x%16llx\n", iframe->r[24], iframe->r[25], iframe->r[26], iframe->r[27]);
|
||||
printf("x28 0x%16llx x29 0x%16llx lr 0x%16llx usp 0x%16llx\n", iframe->r[28], iframe->r[29], iframe->lr, iframe->usp);
|
||||
printf("elr 0x%16llx\n", iframe->elr);
|
||||
printf("spsr 0x%16llx\n", iframe->spsr);
|
||||
kprintf("iframe %p:\n", iframe);
|
||||
kprintf("x0 0x%16llx x1 0x%16llx x2 0x%16llx x3 0x%16llx\n", iframe->r[0], iframe->r[1], iframe->r[2], iframe->r[3]);
|
||||
kprintf("x4 0x%16llx x5 0x%16llx x6 0x%16llx x7 0x%16llx\n", iframe->r[4], iframe->r[5], iframe->r[6], iframe->r[7]);
|
||||
kprintf("x8 0x%16llx x9 0x%16llx x10 0x%16llx x11 0x%16llx\n", iframe->r[8], iframe->r[9], iframe->r[10], iframe->r[11]);
|
||||
kprintf("x12 0x%16llx x13 0x%16llx x14 0x%16llx x15 0x%16llx\n", iframe->r[12], iframe->r[13], iframe->r[14], iframe->r[15]);
|
||||
kprintf("x16 0x%16llx x17 0x%16llx x18 0x%16llx x19 0x%16llx\n", iframe->r[16], iframe->r[17], iframe->r[18], iframe->r[19]);
|
||||
kprintf("x20 0x%16llx x21 0x%16llx x22 0x%16llx x23 0x%16llx\n", iframe->r[20], iframe->r[21], iframe->r[22], iframe->r[23]);
|
||||
kprintf("x24 0x%16llx x25 0x%16llx x26 0x%16llx x27 0x%16llx\n", iframe->r[24], iframe->r[25], iframe->r[26], iframe->r[27]);
|
||||
kprintf("x28 0x%16llx x29 0x%16llx lr 0x%16llx usp 0x%16llx\n", iframe->r[28], iframe->r[29], iframe->lr, iframe->usp);
|
||||
kprintf("elr 0x%16llx\n", iframe->elr);
|
||||
kprintf("spsr 0x%16llx\n", iframe->spsr);
|
||||
arch_stacktrace(iframe->r[29], iframe->elr);
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ void arm64_sync_exception(struct arm64_iframe_long *iframe) {
|
||||
#endif
|
||||
case 0b100000: /* instruction abort from lower level */
|
||||
case 0b100001: /* instruction abort from same level */
|
||||
printf("instruction abort: PC at 0x%llx\n", iframe->elr);
|
||||
kprintf("instruction abort: PC at 0x%llx\n", iframe->elr);
|
||||
print_fault_msg(BITS(iss, 5, 0));
|
||||
break;
|
||||
case 0b100100: /* data abort from lower level */
|
||||
@@ -199,29 +199,29 @@ void arm64_sync_exception(struct arm64_iframe_long *iframe) {
|
||||
/* read the FAR register */
|
||||
uint64_t far = ARM64_READ_SYSREG(far_el1);
|
||||
|
||||
printf("data fault: %s access from PC 0x%llx, FAR 0x%llx, iss 0x%x (DFSC 0x%lx)\n",
|
||||
kprintf("data fault: %s access from PC 0x%llx, FAR 0x%llx, iss 0x%x (DFSC 0x%lx)\n",
|
||||
BIT(iss, 6) ? "Write" : "Read", iframe->elr, far, iss, BITS(iss, 5, 0));
|
||||
print_fault_msg(BITS(iss, 5, 0));
|
||||
break;
|
||||
}
|
||||
case 0b111100: {
|
||||
printf("BRK #0x%04lx instruction: PC at 0x%llx\n",
|
||||
kprintf("BRK #0x%04lx instruction: PC at 0x%llx\n",
|
||||
BITS_SHIFT(iss, 15, 0), iframe->elr);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
printf("unhandled synchronous exception\n");
|
||||
kprintf("unhandled synchronous exception\n");
|
||||
}
|
||||
|
||||
/* unhandled exception, die here */
|
||||
printf("ESR 0x%x: ec 0x%x, il 0x%x, iss 0x%x\n", esr, ec, il, iss);
|
||||
kprintf("ESR 0x%x: ec 0x%x, il 0x%x, iss 0x%x\n", esr, ec, il, iss);
|
||||
dump_iframe(iframe);
|
||||
|
||||
panic("die\n");
|
||||
}
|
||||
|
||||
void arm64_invalid_exception(struct arm64_iframe_long *iframe, unsigned int which) {
|
||||
printf("invalid exception, which 0x%x\n", which);
|
||||
kprintf("invalid exception, which 0x%x\n", which);
|
||||
dump_iframe(iframe);
|
||||
|
||||
panic("die\n");
|
||||
|
||||
@@ -90,7 +90,7 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
|
||||
pte_t *page_table;
|
||||
vaddr_t vaddr_rem;
|
||||
|
||||
LTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr);
|
||||
KLTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(aspace->tt_virt);
|
||||
@@ -127,7 +127,7 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
|
||||
descriptor_type = pte & MMU_PTE_DESCRIPTOR_MASK;
|
||||
pte_addr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
|
||||
|
||||
LTRACEF("va 0x%lx, index %d, index_shift %d, rem 0x%lx, pte 0x%llx\n",
|
||||
KLTRACEF("va 0x%lx, index %d, index_shift %d, rem 0x%lx, pte 0x%llx\n",
|
||||
vaddr, index, index_shift, vaddr_rem, pte);
|
||||
|
||||
if (descriptor_type == MMU_PTE_DESCRIPTOR_INVALID)
|
||||
@@ -183,7 +183,7 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
|
||||
*flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
|
||||
}
|
||||
}
|
||||
LTRACEF("va 0x%lx, paddr 0x%lx, flags 0x%x\n",
|
||||
KLTRACEF("va 0x%lx, paddr 0x%lx, flags 0x%x\n",
|
||||
vaddr, paddr ? *paddr : ~0UL, flags ? *flags : ~0U);
|
||||
return 0;
|
||||
}
|
||||
@@ -191,7 +191,7 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
|
||||
static int alloc_page_table(paddr_t *paddrp, uint page_size_shift) {
|
||||
size_t size = 1U << page_size_shift;
|
||||
|
||||
LTRACEF("page_size_shift %u\n", page_size_shift);
|
||||
KLTRACEF("page_size_shift %u\n", page_size_shift);
|
||||
|
||||
if (size == PAGE_SIZE) {
|
||||
vm_page_t *p = pmm_alloc_page();
|
||||
@@ -215,12 +215,12 @@ static int alloc_page_table(paddr_t *paddrp, uint page_size_shift) {
|
||||
}
|
||||
}
|
||||
|
||||
LTRACEF("allocated 0x%lx\n", *paddrp);
|
||||
KLTRACEF("allocated 0x%lx\n", *paddrp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_page_table(void *vaddr, paddr_t paddr, uint page_size_shift) {
|
||||
LTRACEF("vaddr %p paddr 0x%lx page_size_shift %u\n", vaddr, paddr, page_size_shift);
|
||||
KLTRACEF("vaddr %p paddr 0x%lx page_size_shift %u\n", vaddr, paddr, page_size_shift);
|
||||
|
||||
size_t size = 1U << page_size_shift;
|
||||
vm_page_t *page;
|
||||
@@ -246,24 +246,24 @@ static pte_t *arm64_mmu_get_page_table(vaddr_t index, uint page_size_shift, pte_
|
||||
case MMU_PTE_DESCRIPTOR_INVALID:
|
||||
ret = alloc_page_table(&paddr, page_size_shift);
|
||||
if (ret) {
|
||||
TRACEF("failed to allocate page table\n");
|
||||
KTRACEF("failed to allocate page table\n");
|
||||
return NULL;
|
||||
}
|
||||
vaddr = paddr_to_kvaddr(paddr);
|
||||
|
||||
LTRACEF("allocated page table, vaddr %p, paddr 0x%lx\n", vaddr, paddr);
|
||||
KLTRACEF("allocated page table, vaddr %p, paddr 0x%lx\n", vaddr, paddr);
|
||||
memset(vaddr, MMU_PTE_DESCRIPTOR_INVALID, 1U << page_size_shift);
|
||||
|
||||
__asm__ volatile("dmb ishst" ::: "memory");
|
||||
|
||||
pte = paddr | MMU_PTE_L012_DESCRIPTOR_TABLE;
|
||||
page_table[index] = pte;
|
||||
LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
|
||||
KLTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
|
||||
return vaddr;
|
||||
|
||||
case MMU_PTE_L012_DESCRIPTOR_TABLE:
|
||||
paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
|
||||
LTRACEF("found page table 0x%lx\n", paddr);
|
||||
KLTRACEF("found page table 0x%lx\n", paddr);
|
||||
return paddr_to_kvaddr(paddr);
|
||||
|
||||
case MMU_PTE_L012_DESCRIPTOR_BLOCK:
|
||||
@@ -282,13 +282,13 @@ static bool page_table_is_clear(pte_t *page_table, uint page_size_shift) {
|
||||
for (i = 0; i < count; i++) {
|
||||
pte = page_table[i];
|
||||
if (pte != MMU_PTE_DESCRIPTOR_INVALID) {
|
||||
LTRACEF("page_table at %p still in use, index %d is 0x%llx\n",
|
||||
KLTRACEF("page_table at %p still in use, index %d is 0x%llx\n",
|
||||
page_table, i, pte);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
LTRACEF("page table at %p is clear\n", page_table);
|
||||
KLTRACEF("page table at %p is clear\n", page_table);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -305,7 +305,7 @@ static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
|
||||
pte_t pte;
|
||||
paddr_t page_table_paddr;
|
||||
|
||||
LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, size 0x%lx, index shift %d, page_size_shift %d, page_table %p\n",
|
||||
KLTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, size 0x%lx, index shift %d, page_size_shift %d, page_table %p\n",
|
||||
vaddr, vaddr_rel, size, index_shift, page_size_shift, page_table);
|
||||
|
||||
while (size) {
|
||||
@@ -327,13 +327,13 @@ static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
|
||||
next_page_table, asid);
|
||||
if (chunk_size == block_size ||
|
||||
page_table_is_clear(next_page_table, page_size_shift)) {
|
||||
LTRACEF("pte %p[0x%lx] = 0 (was page table)\n", page_table, index);
|
||||
KLTRACEF("pte %p[0x%lx] = 0 (was page table)\n", page_table, index);
|
||||
page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
|
||||
__asm__ volatile("dmb ishst" ::: "memory");
|
||||
free_page_table(next_page_table, page_table_paddr, page_size_shift);
|
||||
}
|
||||
} else if (pte) {
|
||||
LTRACEF("pte %p[0x%lx] = 0\n", page_table, index);
|
||||
KLTRACEF("pte %p[0x%lx] = 0\n", page_table, index);
|
||||
page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
|
||||
CF;
|
||||
if (asid == MMU_ARM64_GLOBAL_ASID)
|
||||
@@ -341,7 +341,7 @@ static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
|
||||
else
|
||||
ARM64_TLBI(vae1is, vaddr >> 12 | (vaddr_t)asid << 48);
|
||||
} else {
|
||||
LTRACEF("pte %p[0x%lx] already clear\n", page_table, index);
|
||||
KLTRACEF("pte %p[0x%lx] already clear\n", page_table, index);
|
||||
}
|
||||
vaddr += chunk_size;
|
||||
vaddr_rel += chunk_size;
|
||||
@@ -367,12 +367,12 @@ static int arm64_mmu_map_pt(vaddr_t vaddr_in, vaddr_t vaddr_rel_in,
|
||||
vaddr_t block_mask;
|
||||
pte_t pte;
|
||||
|
||||
LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, index shift %d, page_size_shift %d, page_table %p\n",
|
||||
KLTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, index shift %d, page_size_shift %d, page_table %p\n",
|
||||
vaddr, vaddr_rel, paddr, size, attrs,
|
||||
index_shift, page_size_shift, page_table);
|
||||
|
||||
if ((vaddr_rel | paddr | size) & ((1UL << page_size_shift) - 1)) {
|
||||
TRACEF("not page aligned\n");
|
||||
KTRACEF("not page aligned\n");
|
||||
return ERR_INVALID_ARGS;
|
||||
}
|
||||
|
||||
@@ -399,7 +399,7 @@ static int arm64_mmu_map_pt(vaddr_t vaddr_in, vaddr_t vaddr_rel_in,
|
||||
} else {
|
||||
pte = page_table[index];
|
||||
if (pte) {
|
||||
TRACEF("page table entry already in use, index 0x%lx, 0x%llx\n",
|
||||
KTRACEF("page table entry already in use, index 0x%lx, 0x%llx\n",
|
||||
index, pte);
|
||||
goto err;
|
||||
}
|
||||
@@ -410,7 +410,7 @@ static int arm64_mmu_map_pt(vaddr_t vaddr_in, vaddr_t vaddr_rel_in,
|
||||
else
|
||||
pte |= MMU_PTE_L3_DESCRIPTOR_PAGE;
|
||||
|
||||
LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
|
||||
KLTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
|
||||
page_table[index] = pte;
|
||||
}
|
||||
vaddr += chunk_size;
|
||||
@@ -436,17 +436,17 @@ int arm64_mmu_map(vaddr_t vaddr, paddr_t paddr, size_t size, pte_t attrs,
|
||||
vaddr_t vaddr_rel = vaddr - vaddr_base;
|
||||
vaddr_t vaddr_rel_max = 1UL << top_size_shift;
|
||||
|
||||
LTRACEF("vaddr 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, asid 0x%x\n",
|
||||
KLTRACEF("vaddr 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, asid 0x%x\n",
|
||||
vaddr, paddr, size, attrs, asid);
|
||||
|
||||
if (vaddr_rel > vaddr_rel_max - size || size > vaddr_rel_max) {
|
||||
TRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
|
||||
KTRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
|
||||
vaddr, size, vaddr_base, vaddr_rel_max);
|
||||
return ERR_INVALID_ARGS;
|
||||
}
|
||||
|
||||
if (!top_page_table) {
|
||||
TRACEF("page table is NULL\n");
|
||||
KTRACEF("page table is NULL\n");
|
||||
return ERR_INVALID_ARGS;
|
||||
}
|
||||
|
||||
@@ -463,16 +463,16 @@ int arm64_mmu_unmap(vaddr_t vaddr, size_t size,
|
||||
vaddr_t vaddr_rel = vaddr - vaddr_base;
|
||||
vaddr_t vaddr_rel_max = 1UL << top_size_shift;
|
||||
|
||||
LTRACEF("vaddr 0x%lx, size 0x%lx, asid 0x%x\n", vaddr, size, asid);
|
||||
KLTRACEF("vaddr 0x%lx, size 0x%lx, asid 0x%x\n", vaddr, size, asid);
|
||||
|
||||
if (vaddr_rel > vaddr_rel_max - size || size > vaddr_rel_max) {
|
||||
TRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
|
||||
KTRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
|
||||
vaddr, size, vaddr_base, vaddr_rel_max);
|
||||
return ERR_INVALID_ARGS;
|
||||
}
|
||||
|
||||
if (!top_page_table) {
|
||||
TRACEF("page table is NULL\n");
|
||||
KTRACEF("page table is NULL\n");
|
||||
return ERR_INVALID_ARGS;
|
||||
}
|
||||
|
||||
@@ -483,7 +483,7 @@ int arm64_mmu_unmap(vaddr_t vaddr, size_t size,
|
||||
}
|
||||
|
||||
int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) {
|
||||
LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);
|
||||
KLTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(aspace->tt_virt);
|
||||
@@ -520,7 +520,7 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
|
||||
}
|
||||
|
||||
int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
|
||||
LTRACEF("vaddr 0x%lx count %u\n", vaddr, count);
|
||||
KLTRACEF("vaddr 0x%lx count %u\n", vaddr, count);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(aspace->tt_virt);
|
||||
@@ -553,7 +553,7 @@ int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
|
||||
}
|
||||
|
||||
status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) {
|
||||
LTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags);
|
||||
KLTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
|
||||
@@ -590,13 +590,13 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
|
||||
memset(aspace->tt_virt, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
|
||||
KLTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
|
||||
LTRACEF("aspace %p\n", aspace);
|
||||
KLTRACEF("aspace %p\n", aspace);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT((aspace->flags & ARCH_ASPACE_FLAG_KERNEL) == 0);
|
||||
@@ -612,7 +612,7 @@ status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
|
||||
|
||||
void arch_mmu_context_switch(arch_aspace_t *aspace) {
|
||||
if (TRACE_CONTEXT_SWITCH)
|
||||
TRACEF("aspace %p\n", aspace);
|
||||
KTRACEF("aspace %p\n", aspace);
|
||||
|
||||
uint64_t tcr;
|
||||
uint64_t ttbr;
|
||||
@@ -624,13 +624,13 @@ void arch_mmu_context_switch(arch_aspace_t *aspace) {
|
||||
ARM64_WRITE_SYSREG(ttbr0_el1, ttbr);
|
||||
|
||||
if (TRACE_CONTEXT_SWITCH)
|
||||
TRACEF("ttbr 0x%llx, tcr 0x%llx\n", ttbr, tcr);
|
||||
KTRACEF("ttbr 0x%llx, tcr 0x%llx\n", ttbr, tcr);
|
||||
ARM64_TLBI(aside1, (uint64_t)MMU_ARM64_USER_ASID << 48);
|
||||
} else {
|
||||
tcr = MMU_TCR_FLAGS_KERNEL;
|
||||
|
||||
if (TRACE_CONTEXT_SWITCH)
|
||||
TRACEF("tcr 0x%llx\n", tcr);
|
||||
KTRACEF("tcr 0x%llx\n", tcr);
|
||||
}
|
||||
|
||||
ARM64_WRITE_SYSREG(tcr_el1, tcr);
|
||||
|
||||
@@ -27,7 +27,7 @@ extern void bcm28xx_send_ipi(uint irq, uint cpu_mask);
|
||||
#define GIC_IPI_BASE (14)
|
||||
|
||||
status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
LTRACEF("target 0x%x, ipi %u\n", target, ipi);
|
||||
KLTRACEF("target 0x%x, ipi %u\n", target, ipi);
|
||||
|
||||
#if WITH_DEV_INTERRUPT_ARM_GIC
|
||||
uint gic_ipi_num = ipi + GIC_IPI_BASE;
|
||||
@@ -35,7 +35,7 @@ status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
/* filter out targets outside of the range of cpus we care about */
|
||||
target &= ((1UL << SMP_MAX_CPUS) - 1);
|
||||
if (target != 0) {
|
||||
LTRACEF("target 0x%x, gic_ipi %u\n", target, gic_ipi_num);
|
||||
KLTRACEF("target 0x%x, gic_ipi %u\n", target, gic_ipi_num);
|
||||
arm_gic_sgi(gic_ipi_num, ARM_GIC_SGI_FLAG_NS, target);
|
||||
}
|
||||
#elif PLATFORM_BCM28XX
|
||||
@@ -50,13 +50,13 @@ status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
}
|
||||
|
||||
enum handler_return arm_ipi_generic_handler(void *arg) {
|
||||
LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
KLTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
|
||||
return INT_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
enum handler_return arm_ipi_reschedule_handler(void *arg) {
|
||||
LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
KLTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
|
||||
|
||||
return mp_mbx_reschedule_irq();
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ static void initial_thread_func(void) {
|
||||
|
||||
thread_t *current_thread = get_current_thread();
|
||||
|
||||
LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
|
||||
KLTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
|
||||
|
||||
/* release the thread lock that was implicitly held across the reschedule */
|
||||
spin_unlock(&thread_lock);
|
||||
@@ -50,7 +50,7 @@ static void initial_thread_func(void) {
|
||||
|
||||
ret = current_thread->entry(current_thread->arg);
|
||||
|
||||
LTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret);
|
||||
KLTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret);
|
||||
|
||||
thread_exit(ret);
|
||||
}
|
||||
@@ -74,7 +74,7 @@ void arch_thread_initialize(thread_t *t) {
|
||||
}
|
||||
|
||||
void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
|
||||
LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
KLTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
arm64_fpu_pre_context_switch(oldthread);
|
||||
#if WITH_SMP
|
||||
DSB; /* broadcast tlb operations in case the thread moves to another cpu */
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#define LOCAL_TRACE 0
|
||||
|
||||
void arch_early_init(void) {
|
||||
LTRACE;
|
||||
KLTRACE;
|
||||
|
||||
/* enable i/d cache */
|
||||
uint32_t val = mb_read_msr();
|
||||
@@ -22,7 +22,7 @@ void arch_early_init(void) {
|
||||
}
|
||||
|
||||
void arch_init(void) {
|
||||
LTRACE;
|
||||
KLTRACE;
|
||||
}
|
||||
|
||||
void arch_idle(void) {
|
||||
|
||||
@@ -22,7 +22,7 @@ static void initial_thread_func(void) {
|
||||
thread_t *ct = get_current_thread();
|
||||
|
||||
#if LOCAL_TRACE
|
||||
LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
KLTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
dump_thread(ct);
|
||||
#endif
|
||||
|
||||
@@ -32,13 +32,13 @@ static void initial_thread_func(void) {
|
||||
|
||||
int ret = ct->entry(ct->arg);
|
||||
|
||||
LTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
KLTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
|
||||
thread_exit(ret);
|
||||
}
|
||||
|
||||
void arch_thread_initialize(thread_t *t) {
|
||||
LTRACEF("t %p (%s)\n", t, t->name);
|
||||
KLTRACEF("t %p (%s)\n", t, t->name);
|
||||
|
||||
/* some registers we want to clone for the new thread */
|
||||
register uint32_t r2 asm("r2");
|
||||
@@ -57,7 +57,7 @@ void arch_thread_initialize(thread_t *t) {
|
||||
}
|
||||
|
||||
void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
|
||||
LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
KLTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
|
||||
microblaze_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
#define LOCAL_TRACE 0
|
||||
|
||||
void arch_early_init(void) {
|
||||
LTRACE;
|
||||
KLTRACE;
|
||||
|
||||
/* configure the vector table */
|
||||
uint32_t temp = mips_read_c0_status();
|
||||
@@ -56,32 +56,32 @@ void arch_early_init(void) {
|
||||
}
|
||||
|
||||
void arch_init(void) {
|
||||
LTRACE;
|
||||
KLTRACE;
|
||||
|
||||
printf("MIPS registers:\n");
|
||||
printf("\tPRId 0x%x\n", mips_read_c0_prid());
|
||||
printf("\tconfig 0x%x\n", mips_read_c0_config());
|
||||
printf("\tconfig1 0x%x\n", mips_read_c0_config1());
|
||||
printf("\tconfig2 0x%x\n", mips_read_c0_config2());
|
||||
printf("\tconfig3 0x%x\n", mips_read_c0_config3());
|
||||
printf("\tconfig4 0x%x\n", mips_read_c0_config4());
|
||||
printf("\tconfig5 0x%x\n", mips_read_c0_config5());
|
||||
printf("\tconfig6 0x%x\n", mips_read_c0_config6());
|
||||
printf("\tconfig7 0x%x\n", mips_read_c0_config7());
|
||||
printf("\tstatus 0x%x\n", mips_read_c0_status());
|
||||
dprintf(INFO, "MIPS registers:\n");
|
||||
dprintf(INFO, "\tPRId 0x%x\n", mips_read_c0_prid());
|
||||
dprintf(INFO, "\tconfig 0x%x\n", mips_read_c0_config());
|
||||
dprintf(INFO, "\tconfig1 0x%x\n", mips_read_c0_config1());
|
||||
dprintf(INFO, "\tconfig2 0x%x\n", mips_read_c0_config2());
|
||||
dprintf(INFO, "\tconfig3 0x%x\n", mips_read_c0_config3());
|
||||
dprintf(INFO, "\tconfig4 0x%x\n", mips_read_c0_config4());
|
||||
dprintf(INFO, "\tconfig5 0x%x\n", mips_read_c0_config5());
|
||||
dprintf(INFO, "\tconfig6 0x%x\n", mips_read_c0_config6());
|
||||
dprintf(INFO, "\tconfig7 0x%x\n", mips_read_c0_config7());
|
||||
dprintf(INFO, "\tstatus 0x%x\n", mips_read_c0_status());
|
||||
uint32_t intctl = mips_read_c0_intctl();
|
||||
printf("\tintctl 0x%x\n", intctl);
|
||||
printf("\t\tIPTI 0x%lx\n", BITS_SHIFT(intctl, 31, 29));
|
||||
printf("\t\tIPPCI 0x%lx\n", BITS_SHIFT(intctl, 28, 26));
|
||||
printf("\t\tIPFDC 0x%lx\n", BITS_SHIFT(intctl, 25, 23));
|
||||
printf("\tsrsctl 0x%x\n", mips_read_c0_srsctl());
|
||||
printf("\tebase 0x%x\n", mips_read_c0_ebase());
|
||||
printf("\tcount 0x%x\n", mips_read_c0_count());
|
||||
printf("\tcompare 0x%x\n", mips_read_c0_compare());
|
||||
dprintf(INFO, "\tintctl 0x%x\n", intctl);
|
||||
dprintf(INFO, "\t\tIPTI 0x%lx\n", BITS_SHIFT(intctl, 31, 29));
|
||||
dprintf(INFO, "\t\tIPPCI 0x%lx\n", BITS_SHIFT(intctl, 28, 26));
|
||||
dprintf(INFO, "\t\tIPFDC 0x%lx\n", BITS_SHIFT(intctl, 25, 23));
|
||||
dprintf(INFO, "\tsrsctl 0x%x\n", mips_read_c0_srsctl());
|
||||
dprintf(INFO, "\tebase 0x%x\n", mips_read_c0_ebase());
|
||||
dprintf(INFO, "\tcount 0x%x\n", mips_read_c0_count());
|
||||
dprintf(INFO, "\tcompare 0x%x\n", mips_read_c0_compare());
|
||||
|
||||
__asm__ volatile("syscall");
|
||||
|
||||
LTRACE_EXIT;
|
||||
KLTRACE_EXIT;
|
||||
}
|
||||
|
||||
void arch_idle(void) {
|
||||
|
||||
@@ -21,13 +21,13 @@ extern enum handler_return platform_irq(struct mips_iframe *iframe, uint num);
|
||||
void mips_gen_exception(struct mips_iframe *iframe) {
|
||||
uint32_t excode = BITS_SHIFT(iframe->cause, 6, 2);
|
||||
if (excode == 0x8) {
|
||||
LTRACEF("SYSCALL, EPC 0x%x\n", iframe->epc);
|
||||
KLTRACEF("SYSCALL, EPC 0x%x\n", iframe->epc);
|
||||
iframe->epc += 4;
|
||||
} else {
|
||||
LTRACEF("status 0x%x\n", iframe->status);
|
||||
LTRACEF("cause 0x%x\n", iframe->cause);
|
||||
LTRACEF("\texcode 0x%x\n", excode);
|
||||
LTRACEF("epc 0x%x\n", iframe->epc);
|
||||
KLTRACEF("status 0x%x\n", iframe->status);
|
||||
KLTRACEF("cause 0x%x\n", iframe->cause);
|
||||
KLTRACEF("\texcode 0x%x\n", excode);
|
||||
KLTRACEF("epc 0x%x\n", iframe->epc);
|
||||
for (;;);
|
||||
}
|
||||
}
|
||||
@@ -39,7 +39,7 @@ void mips_irq(struct mips_iframe *iframe, uint num) {
|
||||
THREAD_STATS_INC(interrupts);
|
||||
KEVLOG_IRQ_ENTER(num);
|
||||
|
||||
LTRACEF("IRQ %u, EPC 0x%x, old status 0x%x, status 0x%x\n",
|
||||
KLTRACEF("IRQ %u, EPC 0x%x, old status 0x%x, status 0x%x\n",
|
||||
num, iframe->epc, iframe->status, mips_read_c0_status());
|
||||
|
||||
enum handler_return ret = INT_NO_RESCHEDULE;
|
||||
|
||||
@@ -22,7 +22,7 @@ static void initial_thread_func(void) {
|
||||
thread_t *ct = get_current_thread();
|
||||
|
||||
#if LOCAL_TRACE
|
||||
LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
KLTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
dump_thread(ct);
|
||||
#endif
|
||||
|
||||
@@ -32,13 +32,13 @@ static void initial_thread_func(void) {
|
||||
|
||||
int ret = ct->entry(ct->arg);
|
||||
|
||||
LTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
KLTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
|
||||
thread_exit(ret);
|
||||
}
|
||||
|
||||
void arch_thread_initialize(thread_t *t) {
|
||||
LTRACEF("t %p (%s)\n", t, t->name);
|
||||
KLTRACEF("t %p (%s)\n", t, t->name);
|
||||
|
||||
/* zero out the thread context */
|
||||
memset(&t->arch.cs_frame, 0, sizeof(t->arch.cs_frame));
|
||||
@@ -48,7 +48,7 @@ void arch_thread_initialize(thread_t *t) {
|
||||
}
|
||||
|
||||
void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
|
||||
LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
KLTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
|
||||
mips_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ static platform_timer_callback cb;
|
||||
static void *cb_args;
|
||||
|
||||
enum handler_return mips_timer_irq(void) {
|
||||
LTRACEF("count 0x%x\n", mips_read_c0_count());
|
||||
LTRACEF("compare 0x%x\n", mips_read_c0_compare());
|
||||
KLTRACEF("count 0x%x\n", mips_read_c0_count());
|
||||
KLTRACEF("compare 0x%x\n", mips_read_c0_compare());
|
||||
|
||||
/* reset it for the next interval */
|
||||
retry:
|
||||
@@ -59,7 +59,7 @@ retry:
|
||||
}
|
||||
|
||||
status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval) {
|
||||
TRACEF("callback %p, arg %p, interval %u\n", callback, arg, interval);
|
||||
KLTRACEF("callback %p, arg %p, interval %u\n", callback, arg, interval);
|
||||
|
||||
DEBUG_ASSERT(interval > 0);
|
||||
DEBUG_ASSERT(tick_rate != 0 && tick_rate_mhz != 0);
|
||||
|
||||
@@ -23,7 +23,7 @@ static void initial_thread_func(void) {
|
||||
thread_t *ct = get_current_thread();
|
||||
|
||||
#if LOCAL_TRACE
|
||||
LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
KLTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
dump_thread(ct);
|
||||
#endif
|
||||
|
||||
@@ -33,13 +33,13 @@ static void initial_thread_func(void) {
|
||||
|
||||
int ret = ct->entry(ct->arg);
|
||||
|
||||
LTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
KLTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
|
||||
thread_exit(ret);
|
||||
}
|
||||
|
||||
void arch_thread_initialize(thread_t *t) {
|
||||
LTRACEF("t %p (%s)\n", t, t->name);
|
||||
KLTRACEF("t %p (%s)\n", t, t->name);
|
||||
|
||||
/* some registers we want to clone for the new thread */
|
||||
register uint32_t r2 asm("r2");
|
||||
@@ -53,7 +53,7 @@ void arch_thread_initialize(thread_t *t) {
|
||||
}
|
||||
|
||||
void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
|
||||
LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
KLTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
|
||||
or1k_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
|
||||
vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
|
||||
kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
|
||||
|
||||
printf("kernel sstatus %#lx\n", riscv_csr_read(sstatus));
|
||||
kprintf("kernel sstatus %#lx\n", riscv_csr_read(sstatus));
|
||||
|
||||
// build a user status register
|
||||
ulong status;
|
||||
@@ -138,7 +138,7 @@ void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
|
||||
status |= (1ul << RISCV_CSR_XSTATUS_FS_SHIFT); // mark fpu state 'initial'
|
||||
#endif
|
||||
|
||||
printf("user sstatus %#lx\n", status);
|
||||
kprintf("user sstatus %#lx\n", status);
|
||||
|
||||
arch_disable_ints();
|
||||
|
||||
|
||||
@@ -53,22 +53,22 @@ static const char *cause_to_string(long cause) {
|
||||
}
|
||||
|
||||
static void dump_iframe(struct riscv_short_iframe *frame, bool kernel) {
|
||||
printf("a0 %#16lx a1 %#16lx a2 %#16lx a3 %#16lx\n", frame->a0, frame->a1, frame->a2, frame->a3);
|
||||
printf("a4 %#16lx a5 %#16lx a6 %#16lx a7 %#16lx\n", frame->a4, frame->a5, frame->a6, frame->a7);
|
||||
printf("t0 %#16lx t1 %#16lx t2 %#16lx t3 %#16lx\n", frame->t0, frame->t1, frame->t2, frame->t3);
|
||||
printf("t5 %#16lx t6 %#16lx\n", frame->t5, frame->t6);
|
||||
kprintf("a0 %#16lx a1 %#16lx a2 %#16lx a3 %#16lx\n", frame->a0, frame->a1, frame->a2, frame->a3);
|
||||
kprintf("a4 %#16lx a5 %#16lx a6 %#16lx a7 %#16lx\n", frame->a4, frame->a5, frame->a6, frame->a7);
|
||||
kprintf("t0 %#16lx t1 %#16lx t2 %#16lx t3 %#16lx\n", frame->t0, frame->t1, frame->t2, frame->t3);
|
||||
kprintf("t5 %#16lx t6 %#16lx\n", frame->t5, frame->t6);
|
||||
if (!kernel) {
|
||||
printf("gp %#16lx tp %#16lx sp %#lx\n", frame->gp, frame->tp, frame->sp);
|
||||
kprintf("gp %#16lx tp %#16lx sp %#lx\n", frame->gp, frame->tp, frame->sp);
|
||||
}
|
||||
}
|
||||
|
||||
__NO_RETURN __NO_INLINE
|
||||
static void fatal_exception(long cause, ulong epc, struct riscv_short_iframe *frame, bool kernel) {
|
||||
if (cause < 0) {
|
||||
printf("unhandled interrupt cause %#lx, epc %#lx, tval %#lx\n", cause, epc,
|
||||
kprintf("unhandled interrupt cause %#lx, epc %#lx, tval %#lx\n", cause, epc,
|
||||
riscv_csr_read(RISCV_CSR_XTVAL));
|
||||
} else {
|
||||
printf("unhandled exception cause %#lx (%s), epc %#lx, tval %#lx\n", cause,
|
||||
kprintf("unhandled exception cause %#lx (%s), epc %#lx, tval %#lx\n", cause,
|
||||
cause_to_string(cause), epc, riscv_csr_read(RISCV_CSR_XTVAL));
|
||||
}
|
||||
|
||||
@@ -79,13 +79,13 @@ static void fatal_exception(long cause, ulong epc, struct riscv_short_iframe *fr
|
||||
// weak reference, can override this somewhere else
|
||||
__WEAK
|
||||
void riscv_syscall_handler(struct riscv_short_iframe *frame) {
|
||||
printf("unhandled syscall handler\n");
|
||||
kprintf("unhandled syscall handler\n");
|
||||
dump_iframe(frame, false);
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
void riscv_exception_handler(long cause, ulong epc, struct riscv_short_iframe *frame, bool kernel) {
|
||||
LTRACEF("hart %u cause %#lx epc %#lx status %#lx kernel %d\n",
|
||||
KLTRACEF("hart %u cause %#lx epc %#lx status %#lx kernel %d\n",
|
||||
riscv_current_hart(), cause, epc, frame->status, kernel);
|
||||
|
||||
enum handler_return ret = INT_NO_RESCHEDULE;
|
||||
|
||||
@@ -629,7 +629,7 @@ void riscv_early_mmu_init() {
|
||||
// called a bit later once on the boot cpu
|
||||
extern "C"
|
||||
void riscv_mmu_init() {
|
||||
printf("RISCV: MMU ASID mask %#lx\n", riscv_asid_mask);
|
||||
dprintf(INFO, "RISCV: MMU ASID mask %#lx\n", riscv_asid_mask);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -39,14 +39,14 @@ volatile int secondaries_to_init = SMP_MAX_CPUS - 1;
|
||||
uintptr_t _start_physical;
|
||||
|
||||
status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
LTRACEF("target 0x%x, ipi %u\n", target, ipi);
|
||||
KLTRACEF("target 0x%x, ipi %u\n", target, ipi);
|
||||
|
||||
mp_cpu_mask_t m = target;
|
||||
ulong hart_mask = 0;
|
||||
for (uint c = 0; c < SMP_MAX_CPUS && m; c++, m >>= 1) {
|
||||
if (m & 1) {
|
||||
int h = cpu_to_hart_map[c];
|
||||
LTRACEF("c %u h %d m %#x\n", c, h, m);
|
||||
KLTRACEF("c %u h %d m %#x\n", c, h, m);
|
||||
|
||||
// record a pending hart to notify
|
||||
hart_mask |= (1ul << h);
|
||||
@@ -79,7 +79,7 @@ enum handler_return riscv_software_exception(void) {
|
||||
|
||||
rmb();
|
||||
int reason = atomic_swap(&ipi_data[curr_cpu], 0);
|
||||
LTRACEF("cpu %u reason %#x\n", curr_cpu, reason);
|
||||
KLTRACEF("cpu %u reason %#x\n", curr_cpu, reason);
|
||||
|
||||
enum handler_return ret = INT_NO_RESCHEDULE;
|
||||
if (reason & (1u << MP_IPI_RESCHEDULE)) {
|
||||
|
||||
@@ -25,7 +25,7 @@ static void initial_thread_func(void) {
|
||||
thread_t *ct = get_current_thread();
|
||||
|
||||
#if LOCAL_TRACE
|
||||
LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
KLTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
|
||||
dump_thread(ct);
|
||||
#endif
|
||||
|
||||
@@ -35,7 +35,7 @@ static void initial_thread_func(void) {
|
||||
|
||||
int ret = ct->entry(ct->arg);
|
||||
|
||||
LTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
KLTRACEF("thread %p exiting with %d\n", ct, ret);
|
||||
|
||||
thread_exit(ret);
|
||||
}
|
||||
@@ -50,13 +50,13 @@ void arch_thread_initialize(thread_t *t) {
|
||||
t->arch.cs_frame.sp = stack_top;
|
||||
t->arch.cs_frame.ra = (vaddr_t)&initial_thread_func;
|
||||
|
||||
LTRACEF("t %p (%s) stack top %#lx entry %p arg %p\n", t, t->name, stack_top, t->entry, t->arg);
|
||||
KLTRACEF("t %p (%s) stack top %#lx entry %p arg %p\n", t, t->name, stack_top, t->entry, t->arg);
|
||||
}
|
||||
|
||||
void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
|
||||
DEBUG_ASSERT(arch_ints_disabled());
|
||||
|
||||
LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
KLTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
|
||||
|
||||
riscv_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ static platform_timer_callback timer_cb;
|
||||
static void *timer_arg;
|
||||
|
||||
status_t platform_set_oneshot_timer (platform_timer_callback callback, void *arg, lk_time_t interval) {
|
||||
LTRACEF("cb %p, arg %p, interval %u\n", callback, arg, interval);
|
||||
KLTRACEF("cb %p, arg %p, interval %u\n", callback, arg, interval);
|
||||
|
||||
// disable timer
|
||||
riscv_csr_clear(RISCV_CSR_XIE, RISCV_CSR_XIE_TIE);
|
||||
@@ -64,7 +64,7 @@ void platform_stop_timer(void) {
|
||||
}
|
||||
|
||||
enum handler_return riscv_timer_exception(void) {
|
||||
LTRACEF("tick\n");
|
||||
KLTRACEF("tick\n");
|
||||
|
||||
riscv_csr_clear(RISCV_CSR_XIE, RISCV_CSR_XIE_TIE);
|
||||
|
||||
|
||||
@@ -488,7 +488,7 @@ status_t x86_mmu_map_range(map_addr_t init_table, struct map_range *range, arch_
|
||||
status_t map_status;
|
||||
uint32_t no_of_pages, index;
|
||||
|
||||
TRACEF("table 0x%x, range vaddr 0x%lx paddr 0x%lx size %u\n", init_table, range->start_vaddr, range->start_paddr, range->size);
|
||||
KTRACEF("table 0x%x, range vaddr 0x%lx paddr 0x%lx size %u\n", init_table, range->start_vaddr, range->start_paddr, range->size);
|
||||
|
||||
DEBUG_ASSERT(init_table);
|
||||
if (!range)
|
||||
@@ -524,7 +524,7 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
|
||||
arch_flags_t ret_flags;
|
||||
status_t stat;
|
||||
|
||||
LTRACEF("aspace %p, vaddr 0x%lx, paddr %p, flags %p\n", aspace, vaddr, paddr, flags);
|
||||
KLTRACEF("aspace %p, vaddr 0x%lx, paddr %p, flags %p\n", aspace, vaddr, paddr, flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
|
||||
|
||||
@@ -133,7 +133,7 @@ static inline uint64_t get_pfn_from_pde(uint64_t pde) {
|
||||
|
||||
pfn = (pde & X86_2MB_PAGE_FRAME);
|
||||
|
||||
LTRACEF_LEVEL(2, "pde 0x%llx, pfn 0x%llx\n", pde, pfn);
|
||||
KLTRACEF_LEVEL(2, "pde 0x%llx, pfn 0x%llx\n", pde, pfn);
|
||||
|
||||
return pfn;
|
||||
}
|
||||
@@ -200,13 +200,13 @@ status_t x86_mmu_get_mapping(map_addr_t pml4, vaddr_t vaddr, uint32_t *ret_level
|
||||
*last_valid_entry = pml4;
|
||||
*mmu_flags = 0;
|
||||
|
||||
LTRACEF_LEVEL(2, "pml4 0x%llx\n", pml4);
|
||||
KLTRACEF_LEVEL(2, "pml4 0x%llx\n", pml4);
|
||||
|
||||
pml4e = get_pml4_entry_from_pml4_table(vaddr, pml4);
|
||||
if ((pml4e & X86_MMU_PG_P) == 0) {
|
||||
return ERR_NOT_FOUND;
|
||||
}
|
||||
LTRACEF_LEVEL(2, "pml4e 0x%llx\n", pml4e);
|
||||
KLTRACEF_LEVEL(2, "pml4e 0x%llx\n", pml4e);
|
||||
|
||||
pdpe = get_pdp_entry_from_pdp_table(vaddr, pml4e);
|
||||
if ((pdpe & X86_MMU_PG_P) == 0) {
|
||||
@@ -214,7 +214,7 @@ status_t x86_mmu_get_mapping(map_addr_t pml4, vaddr_t vaddr, uint32_t *ret_level
|
||||
*last_valid_entry = pml4e;
|
||||
return ERR_NOT_FOUND;
|
||||
}
|
||||
LTRACEF_LEVEL(2, "pdpe 0x%llx\n", pdpe);
|
||||
KLTRACEF_LEVEL(2, "pdpe 0x%llx\n", pdpe);
|
||||
|
||||
pde = get_pd_entry_from_pd_table(vaddr, pdpe);
|
||||
if ((pde & X86_MMU_PG_P) == 0) {
|
||||
@@ -222,7 +222,7 @@ status_t x86_mmu_get_mapping(map_addr_t pml4, vaddr_t vaddr, uint32_t *ret_level
|
||||
*last_valid_entry = pdpe;
|
||||
return ERR_NOT_FOUND;
|
||||
}
|
||||
LTRACEF_LEVEL(2, "pde 0x%llx\n", pde);
|
||||
KLTRACEF_LEVEL(2, "pde 0x%llx\n", pde);
|
||||
|
||||
/* 2 MB pages */
|
||||
if (pde & X86_MMU_PG_PS) {
|
||||
@@ -369,7 +369,7 @@ status_t x86_mmu_add_mapping(map_addr_t pml4, map_addr_t paddr,
|
||||
map_addr_t *m = NULL;
|
||||
status_t ret = NO_ERROR;
|
||||
|
||||
LTRACEF("pml4 0x%llx paddr 0x%llx vaddr 0x%lx flags 0x%llx\n", pml4, paddr, vaddr, mmu_flags);
|
||||
KLTRACEF("pml4 0x%llx paddr 0x%llx vaddr 0x%lx flags 0x%llx\n", pml4, paddr, vaddr, mmu_flags);
|
||||
|
||||
DEBUG_ASSERT(pml4);
|
||||
if ((!x86_mmu_check_vaddr(vaddr)) || (!x86_mmu_check_paddr(paddr)) )
|
||||
@@ -468,42 +468,42 @@ static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, vaddr_t table_entry) {
|
||||
uint32_t offset = 0, next_level_offset = 0;
|
||||
vaddr_t *table, *next_table_addr, value;
|
||||
|
||||
LTRACEF("vaddr 0x%lx level %d table_entry 0x%lx\n", vaddr, level, table_entry);
|
||||
KLTRACEF("vaddr 0x%lx level %d table_entry 0x%lx\n", vaddr, level, table_entry);
|
||||
|
||||
next_table_addr = NULL;
|
||||
table = (vaddr_t *)(table_entry & X86_PG_FRAME);
|
||||
LTRACEF_LEVEL(2, "table %p\n", table);
|
||||
KLTRACEF_LEVEL(2, "table %p\n", table);
|
||||
|
||||
switch (level) {
|
||||
case PML4_L:
|
||||
offset = (((uint64_t)vaddr >> PML4_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
|
||||
LTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
KLTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
|
||||
LTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
KLTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P)== 0)
|
||||
return;
|
||||
break;
|
||||
case PDP_L:
|
||||
offset = (((uint64_t)vaddr >> PDP_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
|
||||
LTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
KLTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
|
||||
LTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
KLTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
|
||||
return;
|
||||
break;
|
||||
case PD_L:
|
||||
offset = (((uint64_t)vaddr >> PD_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
|
||||
LTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
KLTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
|
||||
LTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
KLTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
|
||||
return;
|
||||
break;
|
||||
case PT_L:
|
||||
offset = (((uint64_t)vaddr >> PT_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
|
||||
LTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
KLTRACEF_LEVEL(2, "offset %u\n", offset);
|
||||
next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
|
||||
LTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
KLTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
|
||||
return;
|
||||
break;
|
||||
@@ -513,13 +513,13 @@ static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, vaddr_t table_entry) {
|
||||
return;
|
||||
}
|
||||
|
||||
LTRACEF_LEVEL(2, "recursing\n");
|
||||
KLTRACEF_LEVEL(2, "recursing\n");
|
||||
|
||||
level -= 1;
|
||||
x86_mmu_unmap_entry(vaddr, level, (vaddr_t)next_table_addr);
|
||||
level += 1;
|
||||
|
||||
LTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
KLTRACEF_LEVEL(2, "next_table_addr %p\n", next_table_addr);
|
||||
|
||||
next_table_addr = (vaddr_t *)((vaddr_t)(next_table_addr) & X86_PG_FRAME);
|
||||
if (level > PT_L) {
|
||||
@@ -562,7 +562,7 @@ status_t x86_mmu_unmap(map_addr_t pml4, vaddr_t vaddr, uint count) {
|
||||
int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
|
||||
addr_t current_cr3_val;
|
||||
|
||||
LTRACEF("aspace %p, vaddr 0x%lx, count %u\n", aspace, vaddr, count);
|
||||
KLTRACEF("aspace %p, vaddr 0x%lx, count %u\n", aspace, vaddr, count);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
|
||||
@@ -588,7 +588,7 @@ status_t x86_mmu_map_range(map_addr_t pml4, struct map_range *range, arch_flags_
|
||||
status_t map_status;
|
||||
uint32_t no_of_pages, index;
|
||||
|
||||
LTRACEF("pml4 0x%llx, range v 0x%lx p 0x%llx size %u flags 0x%llx\n", pml4,
|
||||
KLTRACEF("pml4 0x%llx, range v 0x%lx p 0x%llx size %u flags 0x%llx\n", pml4,
|
||||
range->start_vaddr, range->start_paddr, range->size, flags);
|
||||
|
||||
DEBUG_ASSERT(pml4);
|
||||
@@ -625,7 +625,7 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
|
||||
arch_flags_t ret_flags;
|
||||
status_t stat;
|
||||
|
||||
LTRACEF("aspace %p, vaddr 0x%lx, paddr %p, flags %p\n", aspace, vaddr, paddr, flags);
|
||||
KLTRACEF("aspace %p, vaddr 0x%lx, paddr %p, flags %p\n", aspace, vaddr, paddr, flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
|
||||
@@ -640,7 +640,7 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
|
||||
return stat;
|
||||
|
||||
*paddr = (paddr_t)(last_valid_entry);
|
||||
LTRACEF("paddr 0x%llx\n", last_valid_entry);
|
||||
KLTRACEF("paddr 0x%llx\n", last_valid_entry);
|
||||
|
||||
/* converting x86 arch specific flags to arch mmu flags */
|
||||
if (flags)
|
||||
@@ -655,7 +655,7 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
|
||||
LTRACEF("aspace %p, vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", aspace, vaddr, paddr, count, flags);
|
||||
KLTRACEF("aspace %p, vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", aspace, vaddr, paddr, count, flags);
|
||||
|
||||
if ((!x86_mmu_check_paddr(paddr)))
|
||||
return ERR_INVALID_ARGS;
|
||||
@@ -704,7 +704,7 @@ void x86_mmu_early_init(void) {
|
||||
g_paddr_width = (uint8_t)(addr_width & 0xFF);
|
||||
g_vaddr_width = (uint8_t)((addr_width >> 8) & 0xFF);
|
||||
|
||||
LTRACEF("paddr_width %u vaddr_width %u\n", g_paddr_width, g_vaddr_width);
|
||||
KLTRACEF("paddr_width %u vaddr_width %u\n", g_paddr_width, g_vaddr_width);
|
||||
|
||||
/* unmap the lower identity mapping */
|
||||
kernel_pml4[0] = 0;
|
||||
|
||||
@@ -182,7 +182,7 @@ void x86_exception_handler(x86_iframe_t *frame) {
|
||||
case INT_MF: { /* x87 floating point math fault */
|
||||
uint16_t fsw;
|
||||
__asm__ __volatile__("fnstsw %0" : "=m" (fsw));
|
||||
TRACEF("fsw 0x%hx\n", fsw);
|
||||
KTRACEF("fsw 0x%hx\n", fsw);
|
||||
exception_die(frame, "x87 math fault\n");
|
||||
//asm volatile("fnclex");
|
||||
break;
|
||||
@@ -190,7 +190,7 @@ void x86_exception_handler(x86_iframe_t *frame) {
|
||||
case INT_XM: { /* simd math fault */
|
||||
uint32_t mxcsr;
|
||||
__asm__ __volatile__("stmxcsr %0" : "=m" (mxcsr));
|
||||
TRACEF("mxcsr 0x%x\n", mxcsr);
|
||||
KTRACEF("mxcsr 0x%x\n", mxcsr);
|
||||
exception_die(frame, "simd math fault\n");
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ void fpu_dev_na_handler(void) {
|
||||
|
||||
self = get_current_thread();
|
||||
|
||||
LTRACEF("owner %p self %p\n", fp_owner, self);
|
||||
KLTRACEF("owner %p self %p\n", fp_owner, self);
|
||||
if ((fp_owner != NULL) && (fp_owner != self)) {
|
||||
__asm__ __volatile__("fxsave %0" : "=m" (*fp_owner->arch.fpu_states));
|
||||
__asm__ __volatile__("fxrstor %0" : : "m" (*self->arch.fpu_states));
|
||||
|
||||
7
docs/notes.md
Normal file
7
docs/notes.md
Normal file
@@ -0,0 +1,7 @@
|
||||
sudo ip tuntap add name qemu0 mode tap user $USER
|
||||
|
||||
|
||||
|
||||
|
||||
sudo ip link add name qemu0 type bridge
|
||||
|
||||
@@ -9,4 +9,8 @@ To fix:
|
||||
+ arm arch_ops.h left over pre-armv6 stuff
|
||||
+ see about expanding cycle count to a ulong
|
||||
+ lib console make read only blocks
|
||||
|
||||
|
||||
printf refactoring:
|
||||
+ make dump_thread and dump_all_threads work inside regular context
|
||||
|
||||
|
||||
@@ -27,6 +27,9 @@
|
||||
#include <platform.h>
|
||||
#include <stdio.h>
|
||||
|
||||
/* switch to control whether or not a K: is printed in front of each kprintf message */
|
||||
#define PREFIX_KPRINTF 1
|
||||
|
||||
static int cmd_threads(int argc, const console_cmd_args *argv);
|
||||
static int cmd_threadstats(int argc, const console_cmd_args *argv);
|
||||
static int cmd_threadload(int argc, const console_cmd_args *argv);
|
||||
@@ -99,7 +102,7 @@ static enum handler_return threadload(struct timer *t, lk_time_t now, void *arg)
|
||||
lk_bigtime_t busy_time = 1000000ULL - (delta_time > 1000000ULL ? 1000000ULL : delta_time);
|
||||
uint busypercent = (busy_time * 10000) / (1000000);
|
||||
|
||||
printf("cpu %u LOAD: "
|
||||
kprintf("cpu %u LOAD: "
|
||||
"%u.%02u%%, "
|
||||
"cs %lu, "
|
||||
"pmpts %lu, "
|
||||
@@ -209,3 +212,63 @@ static int cmd_kevlog(int argc, const console_cmd_args *argv) {
|
||||
}
|
||||
|
||||
#endif // WITH_KERNEL_EVLOG
|
||||
|
||||
#if !DISABLE_DEBUG_OUTPUT
|
||||
|
||||
/* kprintf and friends
|
||||
*
|
||||
* k* print routines bypass stdio logic and directly output to the platform's notion
|
||||
* of a debug console.
|
||||
*/
|
||||
static void kprint_prefix(void) {
|
||||
#if PREFIX_KPRINTF
|
||||
const char *prefix = "KERN:";
|
||||
for (; *prefix; prefix++) {
|
||||
platform_dputc(*prefix);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void kputc(char c) {
|
||||
platform_dputc(c);
|
||||
}
|
||||
|
||||
void kputs(const char *str) {
|
||||
kprint_prefix();
|
||||
for (; *str; str++) {
|
||||
platform_dputc(*str);
|
||||
}
|
||||
}
|
||||
|
||||
static int kprintf_output_func(const char *str, size_t len, void *state) {
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
platform_dputc(str[i]);
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
int kprintf(const char *fmt, ...) {
|
||||
int err;
|
||||
|
||||
kprint_prefix();
|
||||
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
err = _printf_engine(&kprintf_output_func, NULL, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int kvprintf(const char *fmt, va_list ap) {
|
||||
int err;
|
||||
|
||||
kprint_prefix();
|
||||
|
||||
err = _printf_engine(&kprintf_output_func, NULL, fmt, ap);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif // !DISABLE_DEBUG_OUTPUT
|
||||
|
||||
@@ -8,11 +8,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <lk/compiler.h>
|
||||
#include <lk/debug.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
__BEGIN_CDECLS
|
||||
|
||||
#include <lk/debug.h>
|
||||
|
||||
/* kernel event log */
|
||||
#if WITH_KERNEL_EVLOG
|
||||
|
||||
@@ -53,4 +53,22 @@ enum {
|
||||
#define KEVLOG_IRQ_ENTER(irqn) kernel_evlog_add(KERNEL_EVLOG_IRQ_ENTER, (uintptr_t)irqn, 0)
|
||||
#define KEVLOG_IRQ_EXIT(irqn) kernel_evlog_add(KERNEL_EVLOG_IRQ_EXIT, (uintptr_t)irqn, 0)
|
||||
|
||||
/*
|
||||
* kprintf and friends
|
||||
*
|
||||
* Is defined to always go directly to the current platform debug mechanism.
|
||||
* Safe to be called in any context.
|
||||
*/
|
||||
#if !DISABLE_DEBUG_OUTPUT
|
||||
void kputc(char c);
|
||||
void kputs(const char *str);
|
||||
int kprintf(const char *fmt, ...) __PRINTFLIKE(1, 2);
|
||||
int kvprintf(const char *fmt, va_list ap);
|
||||
#else
|
||||
static inline void kputc(char c) { }
|
||||
static inline void kputs(const char *str) { return 0; }
|
||||
static inline int __PRINTFLIKE(1, 2) kprintf(const char *fmt, ...) { return 0; }
|
||||
static inline int kvprintf(const char *fmt, va_list ap) { return 0; }
|
||||
#endif
|
||||
|
||||
__END_CDECLS
|
||||
|
||||
@@ -28,7 +28,7 @@ void mp_init(void) {
|
||||
void mp_reschedule(mp_cpu_mask_t target, uint flags) {
|
||||
uint local_cpu = arch_curr_cpu_num();
|
||||
|
||||
LTRACEF("local %d, target 0x%x\n", local_cpu, target);
|
||||
KLTRACEF("local %d, target 0x%x\n", local_cpu, target);
|
||||
|
||||
/* mask out cpus that are not active and the local cpu */
|
||||
target &= mp.active_cpus;
|
||||
@@ -39,7 +39,7 @@ void mp_reschedule(mp_cpu_mask_t target, uint flags) {
|
||||
}
|
||||
target &= ~(1U << local_cpu);
|
||||
|
||||
LTRACEF("local %d, post mask target now 0x%x\n", local_cpu, target);
|
||||
KLTRACEF("local %d, post mask target now 0x%x\n", local_cpu, target);
|
||||
|
||||
arch_mp_send_ipi(target, MP_IPI_RESCHEDULE);
|
||||
}
|
||||
@@ -51,7 +51,7 @@ void mp_set_curr_cpu_active(bool active) {
|
||||
enum handler_return mp_mbx_reschedule_irq(void) {
|
||||
uint cpu = arch_curr_cpu_num();
|
||||
|
||||
LTRACEF("cpu %u\n", cpu);
|
||||
KLTRACEF("cpu %u\n", cpu);
|
||||
|
||||
THREAD_STATS_INC(reschedule_ipis);
|
||||
|
||||
|
||||
@@ -126,14 +126,14 @@ static void novm_init_helper(struct novm_arena *n, const char *name,
|
||||
}
|
||||
|
||||
void novm_add_arena(const char *name, uintptr_t arena_start, uintptr_t arena_size) {
|
||||
LTRACEF("name '%s' start %#lx size %#lx\n", name, arena_start, arena_size);
|
||||
KLTRACEF("name '%s' start %#lx size %#lx\n", name, arena_start, arena_size);
|
||||
for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
|
||||
if (arena[i].pages == 0) {
|
||||
// if this arena covers where the kernel is, bump start to MEM_START
|
||||
if (arena_start < END_OF_KERNEL && arena_start + arena_size > END_OF_KERNEL) {
|
||||
arena_size -= END_OF_KERNEL - arena_start;
|
||||
arena_start = END_OF_KERNEL;
|
||||
LTRACEF("trimming arena to %#lx size %#lx\n", arena_start, arena_size);
|
||||
KLTRACEF("trimming arena to %#lx size %#lx\n", arena_start, arena_size);
|
||||
}
|
||||
|
||||
novm_init_helper(&arena[i], name, arena_start, arena_size, NULL, 0);
|
||||
@@ -178,7 +178,7 @@ void *novm_alloc_helper(struct novm_arena *n, size_t pages) {
|
||||
}
|
||||
|
||||
void *novm_alloc_pages(size_t pages, uint32_t arena_bitmap) {
|
||||
LTRACEF("pages %zu\n", pages);
|
||||
KLTRACEF("pages %zu\n", pages);
|
||||
|
||||
/* allocate from any arena */
|
||||
for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
|
||||
@@ -193,7 +193,7 @@ void *novm_alloc_pages(size_t pages, uint32_t arena_bitmap) {
|
||||
}
|
||||
|
||||
void novm_free_pages(void *address, size_t pages) {
|
||||
LTRACEF("address %p, pages %zu\n", address, pages);
|
||||
KLTRACEF("address %p, pages %zu\n", address, pages);
|
||||
|
||||
struct novm_arena *n = NULL;
|
||||
for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
|
||||
@@ -216,7 +216,7 @@ void novm_free_pages(void *address, size_t pages) {
|
||||
}
|
||||
|
||||
status_t novm_alloc_specific_pages(void *address, size_t pages) {
|
||||
LTRACEF("address %p, pages %zu\n", address, pages);
|
||||
KLTRACEF("address %p, pages %zu\n", address, pages);
|
||||
|
||||
struct novm_arena *n = NULL;
|
||||
for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
#include <lk/list.h>
|
||||
#include <malloc.h>
|
||||
#include <platform.h>
|
||||
#include <printf.h>
|
||||
#include <string.h>
|
||||
#include <target.h>
|
||||
#if WITH_KERNEL_VM
|
||||
@@ -385,8 +384,6 @@ void thread_exit(int retcode) {
|
||||
DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
|
||||
DEBUG_ASSERT(!thread_is_idle(current_thread));
|
||||
|
||||
// dprintf("thread_exit: current %p\n", current_thread);
|
||||
|
||||
THREAD_LOCK(state);
|
||||
|
||||
/* enter the dead state */
|
||||
@@ -532,7 +529,7 @@ void thread_resched(void) {
|
||||
/* if we're switching from a non real time to a real time, cancel
|
||||
* the preemption timer. */
|
||||
#if DEBUG_THREAD_CONTEXT_SWITCH
|
||||
dprintf(ALWAYS, "arch_context_switch: stop preempt, cpu %d, old %p (%s), new %p (%s)\n",
|
||||
kprintf("arch_context_switch: stop preempt, cpu %d, old %p (%s), new %p (%s)\n",
|
||||
cpu, oldthread, oldthread->name, newthread, newthread->name);
|
||||
#endif
|
||||
timer_cancel(&preempt_timer[cpu]);
|
||||
@@ -541,7 +538,7 @@ void thread_resched(void) {
|
||||
/* if we're switching from a real time (or idle thread) to a regular one,
|
||||
* set up a periodic timer to run our preemption tick. */
|
||||
#if DEBUG_THREAD_CONTEXT_SWITCH
|
||||
dprintf(ALWAYS, "arch_context_switch: start preempt, cpu %d, old %p (%s), new %p (%s)\n",
|
||||
kprintf("arch_context_switch: start preempt, cpu %d, old %p (%s), new %p (%s)\n",
|
||||
cpu, oldthread, oldthread->name, newthread, newthread->name);
|
||||
#endif
|
||||
timer_set_periodic(&preempt_timer[cpu], 10, thread_timer_tick, NULL);
|
||||
@@ -555,7 +552,7 @@ void thread_resched(void) {
|
||||
set_current_thread(newthread);
|
||||
|
||||
#if DEBUG_THREAD_CONTEXT_SWITCH
|
||||
dprintf(ALWAYS, "arch_context_switch: cpu %d, old %p (%s, pri %d, flags 0x%x), new %p (%s, pri %d, flags 0x%x)\n",
|
||||
kprintf("arch_context_switch: cpu %d, old %p (%s, pri %d, flags 0x%x), new %p (%s, pri %d, flags 0x%x)\n",
|
||||
cpu, oldthread, oldthread->name, oldthread->priority,
|
||||
oldthread->flags, newthread, newthread->name,
|
||||
newthread->priority, newthread->flags);
|
||||
@@ -958,32 +955,32 @@ static size_t thread_stack_used(thread_t *t) {
|
||||
* @brief Dump debugging info about the specified thread.
|
||||
*/
|
||||
void dump_thread(thread_t *t) {
|
||||
dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
|
||||
kprintf("dump_thread: t %p (%s)\n", t, t->name);
|
||||
#if WITH_SMP
|
||||
dprintf(INFO, "\tstate %s, curr_cpu %d, pinned_cpu %d, priority %d, remaining quantum %d\n",
|
||||
kprintf("\tstate %s, curr_cpu %d, pinned_cpu %d, priority %d, remaining quantum %d\n",
|
||||
thread_state_to_str(t->state), t->curr_cpu, t->pinned_cpu, t->priority, t->remaining_quantum);
|
||||
#else
|
||||
dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d\n",
|
||||
kprintf("\tstate %s, priority %d, remaining quantum %d\n",
|
||||
thread_state_to_str(t->state), t->priority, t->remaining_quantum);
|
||||
#endif
|
||||
#ifdef THREAD_STACK_HIGHWATER
|
||||
dprintf(INFO, "\tstack %p, stack_size %zd, stack_used %zd\n",
|
||||
kprintf("\tstack %p, stack_size %zd, stack_used %zd\n",
|
||||
t->stack, t->stack_size, thread_stack_used(t));
|
||||
#else
|
||||
dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
|
||||
kprintf("\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
|
||||
#endif
|
||||
dprintf(INFO, "\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
|
||||
dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
|
||||
kprintf("\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
|
||||
kprintf("\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
|
||||
#if WITH_KERNEL_VM
|
||||
dprintf(INFO, "\taspace %p\n", t->aspace);
|
||||
kprintf("\taspace %p\n", t->aspace);
|
||||
#endif
|
||||
#if (MAX_TLS_ENTRY > 0)
|
||||
dprintf(INFO, "\ttls:");
|
||||
kprintf(INFO, "\ttls:");
|
||||
int i;
|
||||
for (i=0; i < MAX_TLS_ENTRY; i++) {
|
||||
dprintf(INFO, " 0x%lx", t->tls[i]);
|
||||
kprintf(" 0x%lx", t->tls[i]);
|
||||
}
|
||||
dprintf(INFO, "\n");
|
||||
kprintf("\n");
|
||||
#endif
|
||||
arch_dump_thread(t);
|
||||
}
|
||||
@@ -997,7 +994,7 @@ void dump_all_threads(void) {
|
||||
THREAD_LOCK(state);
|
||||
list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
|
||||
if (t->magic != THREAD_MAGIC) {
|
||||
dprintf(INFO, "bad magic on thread struct %p, aborting.\n", t);
|
||||
kprintf("bad magic on thread struct %p, aborting.\n", t);
|
||||
hexdump(t, sizeof(thread_t));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ void *boot_alloc_mem(size_t len) {
|
||||
ptr = ALIGN(boot_alloc_end, 8);
|
||||
boot_alloc_end = (ptr + ALIGN(len, 8));
|
||||
|
||||
LTRACEF("len %zu, ptr %p\n", len, (void *)ptr);
|
||||
KLTRACEF("len %zu, ptr %p\n", len, (void *)ptr);
|
||||
|
||||
return (void *)ptr;
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ vm_page_t *paddr_to_vm_page(paddr_t addr) {
|
||||
}
|
||||
|
||||
status_t pmm_add_arena(pmm_arena_t *arena) {
|
||||
LTRACEF("arena %p name '%s' base 0x%lx size 0x%zx\n", arena, arena->name, arena->base, arena->size);
|
||||
KLTRACEF("arena %p name '%s' base 0x%lx size 0x%zx\n", arena, arena->name, arena->base, arena->size);
|
||||
|
||||
DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->base));
|
||||
DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->size));
|
||||
@@ -104,7 +104,7 @@ done_add:
|
||||
}
|
||||
|
||||
size_t pmm_alloc_pages(uint count, struct list_node *list) {
|
||||
LTRACEF("count %u\n", count);
|
||||
KLTRACEF("count %u\n", count);
|
||||
|
||||
/* list must be initialized prior to calling this */
|
||||
DEBUG_ASSERT(list);
|
||||
@@ -151,7 +151,7 @@ vm_page_t *pmm_alloc_page(void) {
|
||||
}
|
||||
|
||||
size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list) {
|
||||
LTRACEF("address 0x%lx, count %u\n", address, count);
|
||||
KLTRACEF("address 0x%lx, count %u\n", address, count);
|
||||
|
||||
DEBUG_ASSERT(list);
|
||||
|
||||
@@ -197,7 +197,7 @@ size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list) {
|
||||
}
|
||||
|
||||
size_t pmm_free(struct list_node *list) {
|
||||
LTRACEF("list %p\n", list);
|
||||
KLTRACEF("list %p\n", list);
|
||||
|
||||
DEBUG_ASSERT(list);
|
||||
|
||||
@@ -239,7 +239,7 @@ size_t pmm_free_page(vm_page_t *page) {
|
||||
|
||||
/* physically allocate a run from arenas marked as KMAP */
|
||||
void *pmm_alloc_kpages(uint count, struct list_node *list) {
|
||||
LTRACEF("count %u\n", count);
|
||||
KLTRACEF("count %u\n", count);
|
||||
|
||||
/* fast path for single page */
|
||||
if (count == 1) {
|
||||
@@ -260,7 +260,7 @@ void *pmm_alloc_kpages(uint count, struct list_node *list) {
|
||||
}
|
||||
|
||||
size_t pmm_free_kpages(void *_ptr, uint count) {
|
||||
LTRACEF("ptr %p, count %u\n", _ptr, count);
|
||||
KLTRACEF("ptr %p, count %u\n", _ptr, count);
|
||||
|
||||
uint8_t *ptr = (uint8_t *)_ptr;
|
||||
|
||||
@@ -281,7 +281,7 @@ size_t pmm_free_kpages(void *_ptr, uint count) {
|
||||
}
|
||||
|
||||
size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, struct list_node *list) {
|
||||
LTRACEF("count %u, align %u\n", count, alignment_log2);
|
||||
KLTRACEF("count %u, align %u\n", count, alignment_log2);
|
||||
|
||||
if (count == 0)
|
||||
return 0;
|
||||
@@ -305,8 +305,8 @@ size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, str
|
||||
|
||||
uint aligned_offset = (rounded_base - a->base) / PAGE_SIZE;
|
||||
uint start = aligned_offset;
|
||||
LTRACEF("starting search at aligned offset %u\n", start);
|
||||
LTRACEF("arena base 0x%lx size %zu\n", a->base, a->size);
|
||||
KLTRACEF("starting search at aligned offset %u\n", start);
|
||||
KLTRACEF("arena base 0x%lx size %zu\n", a->base, a->size);
|
||||
|
||||
retry:
|
||||
/* search while we're still within the arena and have a chance of finding a slot
|
||||
@@ -326,7 +326,7 @@ retry:
|
||||
}
|
||||
|
||||
/* we found a run */
|
||||
LTRACEF("found run from pn %u to %u\n", start, start + count);
|
||||
KLTRACEF("found run from pn %u to %u\n", start, start + count);
|
||||
|
||||
/* remove the pages from the run out of the free list */
|
||||
for (uint i = start; i < start + count; i++) {
|
||||
@@ -354,7 +354,7 @@ retry:
|
||||
|
||||
mutex_release(&lock);
|
||||
|
||||
LTRACEF("couldn't find run\n");
|
||||
KLTRACEF("couldn't find run\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -71,18 +71,18 @@ static size_t trim_to_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t s
|
||||
|
||||
size_t offset = vaddr - aspace->base;
|
||||
|
||||
//LTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace size 0x%zx\n",
|
||||
//KLTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace size 0x%zx\n",
|
||||
// vaddr, size, offset, aspace->base, aspace->size);
|
||||
|
||||
if (offset + size < offset)
|
||||
size = ULONG_MAX - offset - 1;
|
||||
|
||||
//LTRACEF("size now 0x%zx\n", size);
|
||||
//KLTRACEF("size now 0x%zx\n", size);
|
||||
|
||||
if (offset + size >= aspace->size - 1)
|
||||
size = aspace->size - offset;
|
||||
|
||||
//LTRACEF("size now 0x%zx\n", size);
|
||||
//KLTRACEF("size now 0x%zx\n", size);
|
||||
|
||||
return size;
|
||||
}
|
||||
@@ -111,12 +111,12 @@ static status_t add_region_to_aspace(vmm_aspace_t *aspace, vmm_region_t *r) {
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(r);
|
||||
|
||||
LTRACEF("aspace %p base 0x%lx size 0x%zx r %p base 0x%lx size 0x%zx\n",
|
||||
KLTRACEF("aspace %p base 0x%lx size 0x%zx r %p base 0x%lx size 0x%zx\n",
|
||||
aspace, aspace->base, aspace->size, r, r->base, r->size);
|
||||
|
||||
/* only try if the region will at least fit in the address space */
|
||||
if (r->size == 0 || !is_region_inside_aspace(aspace, r->base, r->size)) {
|
||||
LTRACEF("region was out of range\n");
|
||||
KLTRACEF("region was out of range\n");
|
||||
return ERR_OUT_OF_RANGE;
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ static status_t add_region_to_aspace(vmm_aspace_t *aspace, vmm_region_t *r) {
|
||||
}
|
||||
}
|
||||
|
||||
LTRACEF("couldn't find spot\n");
|
||||
KLTRACEF("couldn't find spot\n");
|
||||
return ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
@@ -212,7 +212,7 @@ static vaddr_t alloc_spot(vmm_aspace_t *aspace, size_t size, uint8_t align_pow2,
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(size > 0 && IS_PAGE_ALIGNED(size));
|
||||
|
||||
LTRACEF("aspace %p size 0x%zx align %hhu\n", aspace, size, align_pow2);
|
||||
KLTRACEF("aspace %p size 0x%zx align %hhu\n", aspace, size, align_pow2);
|
||||
|
||||
if (align_pow2 < PAGE_SIZE_SHIFT)
|
||||
align_pow2 = PAGE_SIZE_SHIFT;
|
||||
@@ -266,10 +266,10 @@ static vmm_region_t *alloc_region(vmm_aspace_t *aspace, const char *name, size_t
|
||||
struct list_node *before = NULL;
|
||||
|
||||
vaddr = alloc_spot(aspace, size, align_pow2, arch_mmu_flags, &before);
|
||||
LTRACEF("alloc_spot returns 0x%lx, before %p\n", vaddr, before);
|
||||
KLTRACEF("alloc_spot returns 0x%lx, before %p\n", vaddr, before);
|
||||
|
||||
if (vaddr == (vaddr_t)-1) {
|
||||
LTRACEF("failed to find spot\n");
|
||||
KLTRACEF("failed to find spot\n");
|
||||
free(r);
|
||||
return NULL;
|
||||
}
|
||||
@@ -286,7 +286,7 @@ static vmm_region_t *alloc_region(vmm_aspace_t *aspace, const char *name, size_t
|
||||
}
|
||||
|
||||
status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr) {
|
||||
LTRACEF("aspace %p name '%s' size 0x%zx vaddr 0x%lx\n", aspace, name, size, vaddr);
|
||||
KLTRACEF("aspace %p name '%s' size 0x%zx vaddr 0x%lx\n", aspace, name, size, vaddr);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
|
||||
@@ -326,7 +326,7 @@ status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
|
||||
void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags) {
|
||||
status_t ret;
|
||||
|
||||
LTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%lx vmm_flags 0x%x arch_mmu_flags 0x%x\n",
|
||||
KLTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%lx vmm_flags 0x%x arch_mmu_flags 0x%x\n",
|
||||
aspace, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
@@ -370,7 +370,7 @@ status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
|
||||
|
||||
/* map all of the pages */
|
||||
int err = arch_mmu_map(&aspace->arch_aspace, r->base, paddr, size / PAGE_SIZE, arch_mmu_flags);
|
||||
LTRACEF("arch_mmu_map returns %d\n", err);
|
||||
KLTRACEF("arch_mmu_map returns %d\n", err);
|
||||
|
||||
ret = NO_ERROR;
|
||||
|
||||
@@ -383,7 +383,7 @@ status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t siz
|
||||
uint8_t align_pow2, uint vmm_flags, uint arch_mmu_flags) {
|
||||
status_t err = NO_ERROR;
|
||||
|
||||
LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
|
||||
KLTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
|
||||
aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
@@ -457,7 +457,7 @@ status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **p
|
||||
uint8_t align_pow2, uint vmm_flags, uint arch_mmu_flags) {
|
||||
status_t err = NO_ERROR;
|
||||
|
||||
LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
|
||||
KLTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
|
||||
aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
|
||||
|
||||
DEBUG_ASSERT(aspace);
|
||||
@@ -490,7 +490,7 @@ status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **p
|
||||
size_t count = pmm_alloc_pages(size / PAGE_SIZE, &page_list);
|
||||
DEBUG_ASSERT(count <= size);
|
||||
if (count < size / PAGE_SIZE) {
|
||||
LTRACEF("failed to allocate enough pages (asked for %zu, got %zu)\n", size / PAGE_SIZE, count);
|
||||
KLTRACEF("failed to allocate enough pages (asked for %zu, got %zu)\n", size / PAGE_SIZE, count);
|
||||
pmm_free(&page_list);
|
||||
err = ERR_NO_MEMORY;
|
||||
goto err;
|
||||
@@ -680,7 +680,7 @@ void vmm_context_switch(vmm_aspace_t *oldspace, vmm_aspace_t *newaspace) {
|
||||
}
|
||||
|
||||
void vmm_set_active_aspace(vmm_aspace_t *aspace) {
|
||||
LTRACEF("aspace %p\n", aspace);
|
||||
KLTRACEF("aspace %p\n", aspace);
|
||||
|
||||
thread_t *t = get_current_thread();
|
||||
DEBUG_ASSERT(t);
|
||||
|
||||
@@ -29,7 +29,7 @@ void panic(const char *fmt, ...) {
|
||||
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
vprintf(fmt, ap);
|
||||
kvprintf(fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
@@ -41,14 +41,14 @@ void assert_fail_msg(const char* file, int line, const char* expression, const c
|
||||
printf("ASSERT FAILED at (%s:%d): %s\n", file, line, expression);
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
vprintf(fmt, ap);
|
||||
kvprintf(fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
void assert_fail(const char* file, int line, const char* expression) {
|
||||
printf("ASSERT FAILED at (%s:%d): %s\n", file, line, expression);
|
||||
kprintf("ASSERT FAILED at (%s:%d): %s\n", file, line, expression);
|
||||
platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ FILE *get_panic_fd(void) {
|
||||
return &panic_fd;
|
||||
}
|
||||
|
||||
void hexdump(const void *ptr, size_t len) {
|
||||
static void hexdump_print(int (*print)(const char *, ...), const void *ptr, size_t len) {
|
||||
addr_t address = (addr_t)ptr;
|
||||
size_t count;
|
||||
|
||||
@@ -110,30 +110,38 @@ void hexdump(const void *ptr, size_t len) {
|
||||
size_t s = ROUNDUP(MIN(len - count, 16), 4);
|
||||
size_t i;
|
||||
|
||||
printf("0x%08lx: ", address);
|
||||
print("0x%08lx: ", address);
|
||||
for (i = 0; i < s / 4; i++) {
|
||||
u.buf[i] = ((const uint32_t *)address)[i];
|
||||
printf("%08x ", u.buf[i]);
|
||||
print("%08x ", u.buf[i]);
|
||||
}
|
||||
for (; i < 4; i++) {
|
||||
printf(" ");
|
||||
print(" ");
|
||||
}
|
||||
printf("|");
|
||||
print("|");
|
||||
|
||||
for (i=0; i < 16; i++) {
|
||||
char c = u.cbuf[i];
|
||||
if (i < s && isprint(c)) {
|
||||
printf("%c", c);
|
||||
print("%c", c);
|
||||
} else {
|
||||
printf(".");
|
||||
print(".");
|
||||
}
|
||||
}
|
||||
printf("|\n");
|
||||
print("|\n");
|
||||
address += 16;
|
||||
}
|
||||
}
|
||||
|
||||
void hexdump8_ex(const void *ptr, size_t len, uint64_t disp_addr) {
|
||||
void hexdump(const void *ptr, size_t len) {
|
||||
if (arch_ints_disabled()) {
|
||||
hexdump_print(&kprintf, ptr, len);
|
||||
} else {
|
||||
hexdump_print(&printf, ptr, len);
|
||||
}
|
||||
}
|
||||
|
||||
static void hexdump8_ex_print(int (*print)(const char *, ...), const void *ptr, size_t len, uint64_t disp_addr) {
|
||||
addr_t address = (addr_t)ptr;
|
||||
size_t count;
|
||||
size_t i;
|
||||
@@ -142,26 +150,34 @@ void hexdump8_ex(const void *ptr, size_t len, uint64_t disp_addr) {
|
||||
: "0x%08llx: ";
|
||||
|
||||
for (count = 0 ; count < len; count += 16) {
|
||||
printf(addr_fmt, disp_addr + count);
|
||||
print(addr_fmt, disp_addr + count);
|
||||
|
||||
for (i=0; i < MIN(len - count, 16); i++) {
|
||||
printf("%02hhx ", *(const uint8_t *)(address + i));
|
||||
print("%02hhx ", *(const uint8_t *)(address + i));
|
||||
}
|
||||
|
||||
for (; i < 16; i++) {
|
||||
printf(" ");
|
||||
print(" ");
|
||||
}
|
||||
|
||||
printf("|");
|
||||
print("|");
|
||||
|
||||
for (i=0; i < MIN(len - count, 16); i++) {
|
||||
char c = ((const char *)address)[i];
|
||||
printf("%c", isprint(c) ? c : '.');
|
||||
print("%c", isprint(c) ? c : '.');
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
print("\n");
|
||||
address += 16;
|
||||
}
|
||||
}
|
||||
|
||||
void hexdump8_ex(const void *ptr, size_t len, uint64_t disp_addr) {
|
||||
if (arch_ints_disabled()) {
|
||||
hexdump8_ex_print(&kprintf, ptr, len, disp_addr);
|
||||
} else {
|
||||
hexdump8_ex_print(&printf, ptr, len, disp_addr);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // !DISABLE_DEBUG_OUTPUT
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <platform/debug.h>
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <kernel/debug.h>
|
||||
|
||||
#if !defined(LK_DEBUGLEVEL)
|
||||
#define LK_DEBUGLEVEL 0
|
||||
@@ -48,7 +49,9 @@ static inline void hexdump8(const void *ptr, size_t len) {
|
||||
hexdump8_ex(ptr, len, (uint64_t)((addr_t)ptr));
|
||||
}
|
||||
|
||||
#define dprintf(level, x...) do { if ((level) <= LK_DEBUGLEVEL) { printf(x); } } while (0)
|
||||
/* dprintf is defined as a wrapper around kprintf that conditinally enables
|
||||
* based on LK_DEBUGLEVEL. */
|
||||
#define dprintf(level, x...) do { if ((level) <= LK_DEBUGLEVEL) { kprintf(x); } } while (0)
|
||||
|
||||
/* systemwide halts */
|
||||
void panic(const char *fmt, ...) __PRINTFLIKE(1, 2) __NO_RETURN;
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdio.h>
|
||||
#include <kernel/debug.h>
|
||||
|
||||
/* trace routines */
|
||||
#define TRACE_ENTRY printf("%s: entry\n", __PRETTY_FUNCTION__)
|
||||
@@ -23,3 +24,17 @@
|
||||
#define LTRACE do { if (LOCAL_TRACE) { TRACE; } } while (0)
|
||||
#define LTRACEF(x...) do { if (LOCAL_TRACE) { TRACEF(x); } } while (0)
|
||||
#define LTRACEF_LEVEL(level, x...) do { if (LOCAL_TRACE >= (level)) { TRACEF(x); } } while (0)
|
||||
|
||||
/* kprintf versions of the above */
|
||||
#define KTRACE_ENTRY kprintf("%s: entry\n", __PRETTY_FUNCTION__)
|
||||
#define KTRACE_EXIT kprintf("%s: exit\n", __PRETTY_FUNCTION__)
|
||||
#define KTRACE_ENTRY_OBJ kprintf("%s: entry obj %p\n", __PRETTY_FUNCTION__, this)
|
||||
#define KTRACE_EXIT_OBJ kprintf("%s: exit obj %p\n", __PRETTY_FUNCTION__, this)
|
||||
#define KTRACE kprintf("%s:%d\n", __PRETTY_FUNCTION__, __LINE__)
|
||||
#define KTRACEF(str, x...) do { kprintf("%s:%d: " str, __PRETTY_FUNCTION__, __LINE__, ## x); } while (0)
|
||||
|
||||
#define KLTRACE_ENTRY do { if (LOCAL_TRACE) { KTRACE_ENTRY; } } while (0)
|
||||
#define KLTRACE_EXIT do { if (LOCAL_TRACE) { KTRACE_EXIT; } } while (0)
|
||||
#define KLTRACE do { if (LOCAL_TRACE) { KTRACE; } } while (0)
|
||||
#define KLTRACEF(x...) do { if (LOCAL_TRACE) { KTRACEF(x); } } while (0)
|
||||
#define KLTRACEF_LEVEL(level, x...) do { if (LOCAL_TRACE >= (level)) { KTRACEF(x); } } while (0)
|
||||
|
||||
11
top/init.c
11
top/init.c
@@ -20,7 +20,6 @@
|
||||
#include <lk/trace.h>
|
||||
|
||||
#define LOCAL_TRACE 0
|
||||
#define TRACE_INIT (LK_DEBUGLEVEL >= 2)
|
||||
#ifndef EARLIEST_TRACE_LEVEL
|
||||
#define EARLIEST_TRACE_LEVEL LK_INIT_LEVEL_TARGET_EARLY
|
||||
#endif
|
||||
@@ -29,7 +28,7 @@ extern const struct lk_init_struct __start_lk_init __WEAK;
|
||||
extern const struct lk_init_struct __stop_lk_init __WEAK;
|
||||
|
||||
void lk_init_level(enum lk_init_flags required_flag, uint start_level, uint stop_level) {
|
||||
LTRACEF("flags %#x, start_level %#x, stop_level %#x\n",
|
||||
KLTRACEF("flags %#x, start_level %#x, stop_level %#x\n",
|
||||
required_flag, start_level, stop_level);
|
||||
|
||||
ASSERT(start_level > 0);
|
||||
@@ -37,12 +36,12 @@ void lk_init_level(enum lk_init_flags required_flag, uint start_level, uint stop
|
||||
const struct lk_init_struct *last = NULL;
|
||||
for (;;) {
|
||||
/* search for the lowest uncalled hook to call */
|
||||
LTRACEF("last %p, last_called_level %#x\n", last, last_called_level);
|
||||
KLTRACEF("last %p, last_called_level %#x\n", last, last_called_level);
|
||||
|
||||
const struct lk_init_struct *found = NULL;
|
||||
bool seen_last = false;
|
||||
for (const struct lk_init_struct *ptr = &__start_lk_init; ptr != &__stop_lk_init; ptr++) {
|
||||
LTRACEF("looking at %p (%s) level %#x, flags %#x, seen_last %d\n", ptr, ptr->name, ptr->level, ptr->flags, seen_last);
|
||||
KLTRACEF("looking at %p (%s) level %#x, flags %#x, seen_last %d\n", ptr, ptr->name, ptr->level, ptr->flags, seen_last);
|
||||
|
||||
if (ptr == last)
|
||||
seen_last = true;
|
||||
@@ -76,12 +75,10 @@ void lk_init_level(enum lk_init_flags required_flag, uint start_level, uint stop
|
||||
if (!found)
|
||||
break;
|
||||
|
||||
#if TRACE_INIT
|
||||
if (found->level >= EARLIEST_TRACE_LEVEL) {
|
||||
printf("INIT: cpu %d, calling hook %p (%s) at level %#x, flags %#x\n",
|
||||
dprintf(SPEW, "INIT: cpu %d, calling hook %p (%s) at level %#x, flags %#x\n",
|
||||
arch_curr_cpu_num(), found->hook, found->name, found->level, found->flags);
|
||||
}
|
||||
#endif
|
||||
found->hook(found->level);
|
||||
last_called_level = found->level;
|
||||
last = found;
|
||||
|
||||
Reference in New Issue
Block a user