2018-10-14 17:12:01 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2015 Travis Geiselbrecht
|
|
|
|
|
*
|
2019-07-05 17:22:23 -07:00
|
|
|
* Use of this source code is governed by a MIT-style
|
|
|
|
|
* license that can be found in the LICENSE file or at
|
|
|
|
|
* https://opensource.org/licenses/MIT
|
2018-10-14 17:12:01 -07:00
|
|
|
*/
|
2019-02-18 22:05:44 -08:00
|
|
|
#include <assert.h>
|
2019-06-17 18:28:51 -07:00
|
|
|
#include <lk/trace.h>
|
|
|
|
|
#include <lk/debug.h>
|
2018-10-14 17:12:01 -07:00
|
|
|
#include <stdint.h>
|
2021-03-29 03:04:12 -07:00
|
|
|
#include <stdlib.h>
|
2018-10-14 17:12:01 -07:00
|
|
|
#include <arch/riscv.h>
|
2019-02-18 22:05:44 -08:00
|
|
|
#include <arch/ops.h>
|
2019-12-04 09:08:57 -08:00
|
|
|
#include <arch/mp.h>
|
|
|
|
|
#include <lk/init.h>
|
|
|
|
|
#include <lk/main.h>
|
2019-12-04 09:35:56 -08:00
|
|
|
#include <platform.h>
|
2021-10-21 23:08:38 -07:00
|
|
|
#include <arch.h>
|
2018-10-14 17:12:01 -07:00
|
|
|
|
2024-06-01 16:56:19 -07:00
|
|
|
#include "arch/riscv/feature.h"
|
2020-03-28 20:21:25 -07:00
|
|
|
#include "riscv_priv.h"
|
2018-10-14 17:12:01 -07:00
|
|
|
|
2020-03-28 20:21:25 -07:00
|
|
|
#define LOCAL_TRACE 0
|
2019-12-04 09:08:57 -08:00
|
|
|
|
2020-05-03 17:57:46 -07:00
|
|
|
// per cpu structure, pointed to by xscratch
|
2020-12-30 01:09:24 -08:00
|
|
|
struct riscv_percpu percpu[SMP_MAX_CPUS];
|
2020-05-03 17:57:46 -07:00
|
|
|
|
|
|
|
|
// called extremely early from start.S prior to getting into any other C code on
|
|
|
|
|
// both the boot cpu and the secondaries
|
2021-10-21 23:08:38 -07:00
|
|
|
void riscv_configure_percpu_early(uint hart_id, uint __unused, uint cpu_num);
|
2020-12-30 00:52:41 -08:00
|
|
|
void riscv_configure_percpu_early(uint hart_id, uint __unused, uint cpu_num) {
|
2021-04-11 01:38:25 -07:00
|
|
|
// point tp reg at the current cpu structure
|
|
|
|
|
riscv_set_percpu(&percpu[cpu_num]);
|
|
|
|
|
|
2020-12-30 00:52:41 -08:00
|
|
|
// set up the cpu number and hart id for the per cpu structure
|
|
|
|
|
percpu[cpu_num].cpu_num = cpu_num;
|
|
|
|
|
percpu[cpu_num].hart_id = hart_id;
|
2021-04-11 02:56:06 -07:00
|
|
|
wmb();
|
2020-12-30 00:52:41 -08:00
|
|
|
|
|
|
|
|
#if WITH_SMP
|
|
|
|
|
// do any MP percpu config
|
|
|
|
|
riscv_configure_percpu_mp_early(hart_id, cpu_num);
|
|
|
|
|
#endif
|
2020-05-03 17:57:46 -07:00
|
|
|
}
|
|
|
|
|
|
2020-03-28 20:21:25 -07:00
|
|
|
// first C level code to initialize each cpu
|
|
|
|
|
void riscv_early_init_percpu(void) {
|
2021-04-11 02:56:06 -07:00
|
|
|
// clear the scratch register in case we take an exception early
|
|
|
|
|
riscv_csr_write(RISCV_CSR_XSCRATCH, 0);
|
|
|
|
|
|
2018-10-14 17:12:01 -07:00
|
|
|
// set the top level exception handler
|
2019-12-04 09:35:56 -08:00
|
|
|
riscv_csr_write(RISCV_CSR_XTVEC, (uintptr_t)&riscv_exception_entry);
|
2018-10-14 17:12:01 -07:00
|
|
|
|
|
|
|
|
// mask all exceptions, just in case
|
2019-12-04 09:35:56 -08:00
|
|
|
riscv_csr_clear(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_IE);
|
|
|
|
|
riscv_csr_clear(RISCV_CSR_XIE, RISCV_CSR_XIE_SIE | RISCV_CSR_XIE_TIE | RISCV_CSR_XIE_EIE);
|
2018-10-14 17:12:01 -07:00
|
|
|
|
2022-07-17 23:27:42 -07:00
|
|
|
#if RISCV_FPU
|
|
|
|
|
// enable the fpu and zero it out
|
|
|
|
|
riscv_csr_clear(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_MASK);
|
|
|
|
|
riscv_csr_set(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_INITIAL);
|
|
|
|
|
|
|
|
|
|
riscv_fpu_zero();
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-02-18 22:05:44 -08:00
|
|
|
// enable cycle counter (disabled for now, unimplemented on sifive-e)
|
|
|
|
|
//riscv_csr_set(mcounteren, 1);
|
2018-10-14 17:12:01 -07:00
|
|
|
}
|
|
|
|
|
|
2020-03-28 20:21:25 -07:00
|
|
|
// called very early just after entering C code on boot processor
|
|
|
|
|
void arch_early_init(void) {
|
|
|
|
|
riscv_early_init_percpu();
|
2020-12-30 03:43:54 -08:00
|
|
|
|
2024-06-01 16:56:19 -07:00
|
|
|
riscv_feature_early_init();
|
|
|
|
|
|
2020-12-30 03:43:54 -08:00
|
|
|
#if RISCV_S_MODE
|
|
|
|
|
sbi_early_init();
|
2021-03-05 02:21:00 -08:00
|
|
|
#endif
|
|
|
|
|
#if RISCV_MMU
|
|
|
|
|
riscv_early_mmu_init();
|
2020-12-30 03:43:54 -08:00
|
|
|
#endif
|
2020-03-28 20:21:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// later init per cpu
|
|
|
|
|
void riscv_init_percpu(void) {
|
|
|
|
|
#if WITH_SMP
|
|
|
|
|
// enable software interrupts, used for inter-processor-interrupts
|
|
|
|
|
riscv_csr_set(RISCV_CSR_XIE, RISCV_CSR_XIE_SIE);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// enable external interrupts
|
|
|
|
|
riscv_csr_set(RISCV_CSR_XIE, RISCV_CSR_XIE_EIE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// called later once the kernel is running before platform and target init
|
2018-10-14 17:12:01 -07:00
|
|
|
void arch_init(void) {
|
2020-03-28 20:21:25 -07:00
|
|
|
riscv_init_percpu();
|
|
|
|
|
|
2019-02-18 22:05:44 -08:00
|
|
|
// print some arch info
|
2022-05-29 14:48:04 -07:00
|
|
|
const char *mode_string;
|
2020-01-26 20:25:35 -08:00
|
|
|
#if RISCV_M_MODE
|
2022-05-29 14:48:04 -07:00
|
|
|
mode_string = "Machine";
|
|
|
|
|
#elif RISCV_S_MODE
|
|
|
|
|
mode_string = "Supervisor";
|
|
|
|
|
#else
|
|
|
|
|
#error need to define M or S mode
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
dprintf(INFO, "RISCV: %s mode\n", mode_string);
|
2019-12-04 09:35:56 -08:00
|
|
|
dprintf(INFO, "RISCV: mvendorid %#lx marchid %#lx mimpid %#lx mhartid %#x\n",
|
|
|
|
|
riscv_get_mvendorid(), riscv_get_marchid(),
|
|
|
|
|
riscv_get_mimpid(), riscv_current_hart());
|
2022-05-29 14:48:04 -07:00
|
|
|
|
2024-06-01 16:56:19 -07:00
|
|
|
riscv_feature_init();
|
|
|
|
|
|
2022-05-29 14:48:04 -07:00
|
|
|
#if RISCV_M_MODE
|
2019-12-04 09:35:56 -08:00
|
|
|
dprintf(INFO, "RISCV: misa %#lx\n", riscv_csr_read(RISCV_CSR_MISA));
|
2022-05-29 14:48:04 -07:00
|
|
|
#elif RISCV_S_MODE
|
|
|
|
|
sbi_init();
|
2020-01-26 20:25:35 -08:00
|
|
|
#if RISCV_MMU
|
|
|
|
|
dprintf(INFO, "RISCV: MMU enabled sv%u\n", RISCV_MMU);
|
2021-03-05 02:21:00 -08:00
|
|
|
riscv_mmu_init();
|
2020-01-26 20:25:35 -08:00
|
|
|
#endif
|
2019-12-04 09:35:56 -08:00
|
|
|
#endif
|
2019-02-18 22:05:44 -08:00
|
|
|
|
2019-12-04 09:08:57 -08:00
|
|
|
#if WITH_SMP
|
2020-03-28 20:21:25 -07:00
|
|
|
riscv_boot_secondaries();
|
2019-12-04 09:08:57 -08:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-14 17:12:01 -07:00
|
|
|
void arch_idle(void) {
|
2020-01-18 18:26:52 -08:00
|
|
|
// let the platform/target disable wfi
|
|
|
|
|
#if !RISCV_DISABLE_WFI
|
2019-11-02 18:13:02 -07:00
|
|
|
__asm__ volatile("wfi");
|
2020-01-18 18:26:52 -08:00
|
|
|
#endif
|
2018-10-14 17:12:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
|
|
|
|
|
PANIC_UNIMPLEMENTED;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-29 03:04:12 -07:00
|
|
|
#if RISCV_S_MODE
|
|
|
|
|
/* switch to user mode, set the user stack pointer to user_stack_top, get into user space */
|
|
|
|
|
void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
|
|
|
|
|
DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 8));
|
|
|
|
|
|
|
|
|
|
thread_t *ct = get_current_thread();
|
|
|
|
|
|
|
|
|
|
vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
|
2021-04-11 02:56:06 -07:00
|
|
|
kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
|
2021-03-29 03:04:12 -07:00
|
|
|
|
2021-04-11 02:56:06 -07:00
|
|
|
printf("kernel sstatus %#lx\n", riscv_csr_read(sstatus));
|
2021-03-29 03:04:12 -07:00
|
|
|
|
2021-04-11 02:56:06 -07:00
|
|
|
// build a user status register
|
|
|
|
|
ulong status;
|
|
|
|
|
status = RISCV_CSR_XSTATUS_PIE |
|
|
|
|
|
RISCV_CSR_XSTATUS_SUM;
|
2021-03-29 03:04:12 -07:00
|
|
|
|
2021-04-11 02:56:06 -07:00
|
|
|
printf("user sstatus %#lx\n", status);
|
2021-03-29 03:04:12 -07:00
|
|
|
|
|
|
|
|
arch_disable_ints();
|
|
|
|
|
|
2021-04-11 02:56:06 -07:00
|
|
|
riscv_csr_write(sstatus, status);
|
|
|
|
|
riscv_csr_write(sepc, entry_point);
|
|
|
|
|
riscv_csr_write(sscratch, kernel_stack_top);
|
2022-03-12 17:39:00 -08:00
|
|
|
|
2022-07-17 23:27:42 -07:00
|
|
|
#if RISCV_FPU
|
|
|
|
|
status |= RISCV_CSR_XSTATUS_FS_INITIAL; // mark fpu state 'initial'
|
2022-03-12 17:39:00 -08:00
|
|
|
riscv_fpu_zero();
|
2022-07-17 23:27:42 -07:00
|
|
|
#endif
|
2022-03-12 17:39:00 -08:00
|
|
|
|
2021-04-11 02:56:06 -07:00
|
|
|
// put the current tp (percpu pointer) just below the top of the stack
|
|
|
|
|
// the exception code will recover it when coming from user space
|
|
|
|
|
((uintptr_t *)kernel_stack_top)[-1] = (uintptr_t)riscv_get_percpu();
|
2021-03-29 03:04:12 -07:00
|
|
|
asm volatile(
|
2021-04-13 02:39:34 -07:00
|
|
|
// set the user stack pointer
|
2021-04-11 02:56:06 -07:00
|
|
|
"mv sp, %0\n"
|
2021-04-13 02:39:34 -07:00
|
|
|
// zero out the rest of the integer state
|
2021-04-11 02:56:06 -07:00
|
|
|
"li a0, 0\n"
|
|
|
|
|
"li a1, 0\n"
|
|
|
|
|
"li a2, 0\n"
|
|
|
|
|
"li a3, 0\n"
|
|
|
|
|
"li a4, 0\n"
|
|
|
|
|
"li a5, 0\n"
|
|
|
|
|
"li a6, 0\n"
|
|
|
|
|
"li a7, 0\n"
|
|
|
|
|
"li t0, 0\n"
|
|
|
|
|
"li t1, 0\n"
|
|
|
|
|
"li t2, 0\n"
|
|
|
|
|
"li t3, 0\n"
|
|
|
|
|
"li t4, 0\n"
|
|
|
|
|
"li t5, 0\n"
|
|
|
|
|
"li t6, 0\n"
|
|
|
|
|
"li s0, 0\n"
|
|
|
|
|
"li s1, 0\n"
|
|
|
|
|
"li s2, 0\n"
|
|
|
|
|
"li s3, 0\n"
|
|
|
|
|
"li s4, 0\n"
|
|
|
|
|
"li s5, 0\n"
|
|
|
|
|
"li s6, 0\n"
|
|
|
|
|
"li s7, 0\n"
|
|
|
|
|
"li s8, 0\n"
|
|
|
|
|
"li s9, 0\n"
|
|
|
|
|
"li s10, 0\n"
|
|
|
|
|
"li s11, 0\n"
|
|
|
|
|
"li ra, 0\n"
|
|
|
|
|
"li gp, 0\n"
|
|
|
|
|
"li tp, 0\n"
|
|
|
|
|
"sret"
|
|
|
|
|
:: "r" (user_stack_top)
|
|
|
|
|
);
|
|
|
|
|
|
2021-03-29 03:04:12 -07:00
|
|
|
__UNREACHABLE;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-10-14 17:12:01 -07:00
|
|
|
/* unimplemented cache operations */
|
2020-01-19 16:16:59 -08:00
|
|
|
#if RISCV_NO_CACHE_OPS
|
|
|
|
|
void arch_disable_cache(uint flags) { }
|
|
|
|
|
void arch_enable_cache(uint flags) { }
|
|
|
|
|
|
|
|
|
|
void arch_clean_cache_range(addr_t start, size_t len) { }
|
|
|
|
|
void arch_clean_invalidate_cache_range(addr_t start, size_t len) { }
|
|
|
|
|
void arch_invalidate_cache_range(addr_t start, size_t len) { }
|
|
|
|
|
void arch_sync_cache_range(addr_t start, size_t len) { }
|
|
|
|
|
#else
|
2018-10-14 17:12:01 -07:00
|
|
|
void arch_disable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
|
|
|
|
|
void arch_enable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
|
|
|
|
|
|
|
|
|
|
void arch_clean_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
|
|
|
|
|
void arch_clean_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
|
|
|
|
|
void arch_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
|
|
|
|
|
void arch_sync_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
|
2020-01-19 16:16:59 -08:00
|
|
|
#endif
|