[kernel] tweak a few thread apis to to take a const pointer

A bit of reformatting on some ARM code while was touching it.
This commit is contained in:
Travis Geiselbrecht
2025-09-20 13:40:10 -07:00
parent f5999d5a40
commit e739abc490
14 changed files with 110 additions and 92 deletions

View File

@@ -207,7 +207,7 @@ void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
}
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "sp 0x%lx", t->arch.sp);

View File

@@ -75,7 +75,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
arm_context_switch(&oldthread->arch.sp, newthread->arch.sp);
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "sp 0x%lx\n", t->arch.sp);

View File

@@ -5,20 +5,20 @@
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lk/debug.h>
#include <stdlib.h>
#include <arch.h>
#include <arch/atomic.h>
#include <arch/ops.h>
#include <arch/arm64.h>
#include <arch/arm64/mmu.h>
#include <arch/atomic.h>
#include <arch/mp.h>
#include <arch/ops.h>
#include <assert.h>
#include <kernel/thread.h>
#include <lk/debug.h>
#include <lk/init.h>
#include <lk/main.h>
#include <platform.h>
#include <lk/trace.h>
#include <assert.h>
#include <platform.h>
#include <stdlib.h>
#define LOCAL_TRACE 0
@@ -40,8 +40,7 @@ void arch_early_init(void) {
platform_init_mmu_mappings();
}
void arch_stacktrace(uint64_t fp, uint64_t pc)
{
void arch_stacktrace(uint64_t fp, uint64_t pc) {
struct arm64_stackframe frame;
if (!fp) {
@@ -57,8 +56,9 @@ void arch_stacktrace(uint64_t fp, uint64_t pc)
printf("0x%llx\n", frame.pc);
/* Stack frame pointer should be 16 bytes aligned */
if (frame.fp & 0xF)
if (frame.fp & 0xF) {
break;
}
frame.pc = *((uint64_t *)(frame.fp + 8));
frame.fp = *((uint64_t *)frame.fp);
@@ -111,7 +111,7 @@ void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
* all interrupts enabled
* mode 0: EL0t
*/
uint32_t spsr = 0;
uint64_t spsr = 0;
arch_disable_ints();
@@ -122,10 +122,10 @@ void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
"msr spsr_el1, %[spsr];"
"eret;"
:
: [ustack]"r"(user_stack_top),
[kstack]"r"(kernel_stack_top),
[entry]"r"(entry_point),
[spsr]"r"(spsr)
: [ustack] "r"(user_stack_top),
[kstack] "r"(kernel_stack_top),
[entry] "r"(entry_point),
[spsr] "r"(spsr)
: "memory");
__UNREACHABLE;
}
@@ -135,8 +135,9 @@ void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
void arm64_secondary_entry(ulong);
void arm64_secondary_entry(ulong asm_cpu_num) {
uint cpu = arch_curr_cpu_num();
if (cpu != asm_cpu_num)
if (cpu != asm_cpu_num) {
return;
}
arm64_cpu_early_init();
@@ -157,4 +158,3 @@ void arm64_secondary_entry(ulong asm_cpu_num) {
lk_secondary_cpu_entry();
}
#endif

View File

@@ -8,17 +8,17 @@
#include <arch/arm64/mmu.h>
#include <assert.h>
#include <kernel/vm.h>
#include <lib/heap.h>
#include <lk/bits.h>
#include <lk/debug.h>
#include <lk/err.h>
#include <kernel/vm.h>
#include <lib/heap.h>
#include <lk/trace.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <lk/trace.h>
#define LOCAL_TRACE 0
#define LOCAL_TRACE 0
#define TRACE_CONTEXT_SWITCH 0
STATIC_ASSERT(((long)KERNEL_BASE >> MMU_KERNEL_SIZE_SHIFT) == -1);
@@ -27,9 +27,8 @@ STATIC_ASSERT(MMU_KERNEL_SIZE_SHIFT <= 48);
STATIC_ASSERT(MMU_KERNEL_SIZE_SHIFT >= 25);
/* the main translation table */
pte_t arm64_kernel_translation_table[MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP]
__ALIGNED(MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP * 8)
__SECTION(".bss.prebss.translation_table");
pte_t arm64_kernel_translation_table[MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP] __ALIGNED(MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP * 8)
__SECTION(".bss.prebss.translation_table");
/* the base TCR flags, computed from early init code in start.S */
uint64_t arm64_mmu_tcr_flags __SECTION(".bss.prebss.tcr_flags");
@@ -107,8 +106,9 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
DEBUG_ASSERT(aspace->tt_virt);
DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
if (!is_valid_vaddr(aspace, vaddr))
if (!is_valid_vaddr(aspace, vaddr)) {
return ERR_OUT_OF_RANGE;
}
/* compute shift values based on if this address space is for kernel or user space */
if (aspace->flags & ARCH_ASPACE_FLAG_KERNEL) {
@@ -141,17 +141,16 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
LTRACEF("va 0x%lx, index %d, index_shift %d, rem 0x%lx, pte 0x%llx\n",
vaddr, index, index_shift, vaddr_rem, pte);
if (descriptor_type == MMU_PTE_DESCRIPTOR_INVALID)
if (descriptor_type == MMU_PTE_DESCRIPTOR_INVALID) {
return ERR_NOT_FOUND;
}
if (descriptor_type == ((index_shift > page_size_shift) ?
MMU_PTE_L012_DESCRIPTOR_BLOCK :
MMU_PTE_L3_DESCRIPTOR_PAGE)) {
if (descriptor_type == ((index_shift > page_size_shift) ? MMU_PTE_L012_DESCRIPTOR_BLOCK : MMU_PTE_L3_DESCRIPTOR_PAGE)) {
break;
}
if (index_shift <= page_size_shift ||
descriptor_type != MMU_PTE_L012_DESCRIPTOR_TABLE) {
descriptor_type != MMU_PTE_L012_DESCRIPTOR_TABLE) {
PANIC_UNIMPLEMENTED;
}
@@ -159,12 +158,14 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
index_shift -= page_size_shift - 3;
}
if (paddr)
if (paddr) {
*paddr = pte_addr + vaddr_rem;
}
if (flags) {
*flags = 0;
if (pte & MMU_PTE_ATTR_NON_SECURE)
if (pte & MMU_PTE_ATTR_NON_SECURE) {
*flags |= ARCH_MMU_FLAG_NS;
}
switch (pte & MMU_PTE_ATTR_ATTR_INDEX_MASK) {
case MMU_PTE_ATTR_STRONGLY_ORDERED:
*flags |= ARCH_MMU_FLAG_UNCACHED;
@@ -222,12 +223,14 @@ static int alloc_page_table(paddr_t *paddrp, uint page_size_shift) {
} else if (size > PAGE_SIZE) {
size_t count = size / PAGE_SIZE;
size_t ret = pmm_alloc_contiguous(count, page_size_shift, paddrp, NULL);
if (ret != count)
if (ret != count) {
return ERR_NO_MEMORY;
}
} else {
void *vaddr = memalign(size, size);
if (!vaddr)
if (!vaddr) {
return ERR_NO_MEMORY;
}
*paddrp = vaddr_to_paddr(vaddr);
if (*paddrp == 0) {
free(vaddr);
@@ -247,8 +250,9 @@ static void free_page_table(void *vaddr, paddr_t paddr, uint page_size_shift) {
if (size >= PAGE_SIZE) {
page = paddr_to_vm_page(paddr);
if (!page)
if (!page) {
panic("bad page table paddr 0x%lx\n", paddr);
}
pmm_free_page(page);
} else {
free(vaddr);
@@ -295,14 +299,11 @@ static pte_t *arm64_mmu_get_page_table(vaddr_t index, uint page_size_shift, pte_
}
static bool page_table_is_clear(pte_t *page_table, uint page_size_shift) {
int i;
int count = 1U << (page_size_shift - 3);
pte_t pte;
for (i = 0; i < count; i++) {
pte = page_table[i];
const size_t count = 1UL << (page_size_shift - 3);
for (size_t i = 0; i < count; i++) {
const pte_t pte = page_table[i];
if (pte != MMU_PTE_DESCRIPTOR_INVALID) {
LTRACEF("page_table at %p still in use, index %d is 0x%llx\n",
LTRACEF("page_table at %p still in use, index %zu is %#llx\n",
page_table, i, pte);
return false;
}
@@ -338,7 +339,7 @@ static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
pte = page_table[index];
if (index_shift > page_size_shift &&
(pte & MMU_PTE_DESCRIPTOR_MASK) == MMU_PTE_L012_DESCRIPTOR_TABLE) {
(pte & MMU_PTE_DESCRIPTOR_MASK) == MMU_PTE_L012_DESCRIPTOR_TABLE) {
page_table_paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
next_page_table = paddr_to_kvaddr(page_table_paddr);
arm64_mmu_unmap_pt(vaddr, vaddr_rem, chunk_size,
@@ -346,7 +347,7 @@ static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
page_size_shift,
next_page_table, asid);
if (chunk_size == block_size ||
page_table_is_clear(next_page_table, page_size_shift)) {
page_table_is_clear(next_page_table, page_size_shift)) {
LTRACEF("pte %p[0x%lx] = 0 (was page table)\n", page_table, index);
page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
__asm__ volatile("dmb ishst" ::: "memory");
@@ -356,10 +357,11 @@ static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
LTRACEF("pte %p[0x%lx] = 0\n", page_table, index);
page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
CF;
if (asid == MMU_ARM64_GLOBAL_ASID)
if (asid == MMU_ARM64_GLOBAL_ASID) {
ARM64_TLBI(vaae1is, BITS_SHIFT(vaddr, 55, 12));
else
} else {
ARM64_TLBI(vae1is, BITS_SHIFT(vaddr, 55, 12) | (vaddr_t)asid << 48);
}
} else {
LTRACEF("pte %p[0x%lx] already clear\n", page_table, index);
}
@@ -404,18 +406,20 @@ static int arm64_mmu_map_pt(vaddr_t vaddr_in, vaddr_t vaddr_rel_in,
index = vaddr_rel >> index_shift;
if (((vaddr_rel | paddr) & block_mask) ||
(chunk_size != block_size) ||
(index_shift > MMU_PTE_DESCRIPTOR_BLOCK_MAX_SHIFT)) {
(chunk_size != block_size) ||
(index_shift > MMU_PTE_DESCRIPTOR_BLOCK_MAX_SHIFT)) {
next_page_table = arm64_mmu_get_page_table(index, page_size_shift,
page_table);
if (!next_page_table)
page_table);
if (!next_page_table) {
goto err;
}
ret = arm64_mmu_map_pt(vaddr, vaddr_rem, paddr, chunk_size, attrs,
index_shift - (page_size_shift - 3),
page_size_shift, next_page_table, asid);
if (ret)
if (ret) {
goto err;
}
} else {
pte = page_table[index];
if (pte) {
@@ -425,10 +429,11 @@ static int arm64_mmu_map_pt(vaddr_t vaddr_in, vaddr_t vaddr_rel_in,
}
pte = paddr | attrs;
if (index_shift > page_size_shift)
if (index_shift > page_size_shift) {
pte |= MMU_PTE_L012_DESCRIPTOR_BLOCK;
else
} else {
pte |= MMU_PTE_L3_DESCRIPTOR_PAGE;
}
LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
page_table[index] = pte;
@@ -509,17 +514,20 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
DEBUG_ASSERT(aspace->tt_virt);
DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
if (!is_valid_vaddr(aspace, vaddr))
if (!is_valid_vaddr(aspace, vaddr)) {
return ERR_OUT_OF_RANGE;
}
/* paddr and vaddr must be aligned */
DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr)) {
return ERR_INVALID_ARGS;
}
if (count == 0)
if (count == 0) {
return NO_ERROR;
}
int ret;
if (aspace->flags & ARCH_ASPACE_FLAG_KERNEL) {
@@ -547,12 +555,14 @@ int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
if (!is_valid_vaddr(aspace, vaddr))
if (!is_valid_vaddr(aspace, vaddr)) {
return ERR_OUT_OF_RANGE;
}
DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
if (!IS_PAGE_ALIGNED(vaddr))
if (!IS_PAGE_ALIGNED(vaddr)) {
return ERR_INVALID_ARGS;
}
int ret;
if (aspace->flags & ARCH_ASPACE_FLAG_KERNEL) {
@@ -592,15 +602,16 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
aspace->tt_virt = arm64_kernel_translation_table;
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
} else {
//DEBUG_ASSERT(base >= 0);
// DEBUG_ASSERT(base >= 0);
DEBUG_ASSERT(base + size <= 1UL << MMU_USER_SIZE_SHIFT);
aspace->base = base;
aspace->size = size;
pte_t *va = pmm_alloc_kpages(1, NULL);
if (!va)
if (!va) {
return ERR_NO_MEMORY;
}
aspace->tt_virt = va;
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
@@ -631,8 +642,9 @@ status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
}
void arch_mmu_context_switch(arch_aspace_t *aspace) {
if (TRACE_CONTEXT_SWITCH)
if (TRACE_CONTEXT_SWITCH) {
TRACEF("aspace %p\n", aspace);
}
uint64_t tcr = arm64_mmu_tcr_flags;
uint64_t ttbr;
@@ -643,19 +655,27 @@ void arch_mmu_context_switch(arch_aspace_t *aspace) {
ttbr = ((uint64_t)MMU_ARM64_USER_ASID << 48) | aspace->tt_phys;
ARM64_WRITE_SYSREG(ttbr0_el1, ttbr);
if (TRACE_CONTEXT_SWITCH)
if (TRACE_CONTEXT_SWITCH) {
TRACEF("ttbr 0x%llx, tcr 0x%llx\n", ttbr, tcr);
}
ARM64_TLBI(aside1, (uint64_t)MMU_ARM64_USER_ASID << 48);
} else {
tcr |= MMU_TCR_FLAGS_KERNEL;
if (TRACE_CONTEXT_SWITCH)
if (TRACE_CONTEXT_SWITCH) {
TRACEF("tcr 0x%llx\n", tcr);
}
}
ARM64_WRITE_SYSREG(tcr_el1, tcr);
}
bool arch_mmu_supports_nx_mappings(void) { return true; }
bool arch_mmu_supports_ns_mappings(void) { return true; }
bool arch_mmu_supports_user_aspaces(void) { return true; }
bool arch_mmu_supports_nx_mappings(void) {
return true;
}
bool arch_mmu_supports_ns_mappings(void) {
return true;
}
bool arch_mmu_supports_user_aspaces(void) {
return true;
}

View File

@@ -7,11 +7,10 @@
*/
#include <arch/mp.h>
#include <assert.h>
#include <lk/trace.h>
#include <lk/err.h>
#include <platform/interrupts.h>
#include <arch/ops.h>
#include <lk/err.h>
#include <lk/trace.h>
#include <platform/interrupts.h>
#if WITH_DEV_INTERRUPT_ARM_GIC
#include <dev/interrupt/arm_gic.h>
@@ -65,7 +64,6 @@ void arch_mp_init_percpu(void) {
register_int_handler(MP_IPI_GENERIC + GIC_IPI_BASE, &arm_ipi_generic_handler, 0);
register_int_handler(MP_IPI_RESCHEDULE + GIC_IPI_BASE, &arm_ipi_reschedule_handler, 0);
//unmask_interrupt(MP_IPI_GENERIC + GIC_IPI_BASE);
//unmask_interrupt(MP_IPI_RESCHEDULE + GIC_IPI_BASE);
// unmask_interrupt(MP_IPI_GENERIC + GIC_IPI_BASE);
// unmask_interrupt(MP_IPI_RESCHEDULE + GIC_IPI_BASE);
}

View File

@@ -5,20 +5,20 @@
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <sys/types.h>
#include <string.h>
#include <stdlib.h>
#include <arch/arm64.h>
#include <kernel/thread.h>
#include <lk/debug.h>
#include <lk/trace.h>
#include <kernel/thread.h>
#include <arch/arm64.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#define LOCAL_TRACE 0
struct context_switch_frame {
vaddr_t lr;
vaddr_t pad; // Padding to keep frame size a multiple of
vaddr_t tpidr_el0; // sp alignment requirements (16 bytes)
vaddr_t pad; // Padding to keep frame size a multiple of
vaddr_t tpidr_el0; // sp alignment requirements (16 bytes)
vaddr_t tpidrro_el0;
vaddr_t r18;
vaddr_t r19;
@@ -80,7 +80,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
arm64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "sp 0x%lx\n", t->arch.sp);

View File

@@ -53,7 +53,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
m68k_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
#if 0
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");

View File

@@ -62,7 +62,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
microblaze_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "sp 0x%x\n", t->arch.cs_frame.r1);

View File

@@ -53,7 +53,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
mips_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "sp 0x%x\n", t->arch.cs_frame.sp);

View File

@@ -58,7 +58,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
or1k_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "sp 0x%x\n", t->arch.cs_frame.r1);

View File

@@ -120,7 +120,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
riscv_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
#if RISCV_FPU

View File

@@ -77,7 +77,7 @@ void arch_thread_initialize(thread_t *t) {
t->arch.sp = (vaddr_t)frame;
}
void arch_dump_thread(thread_t *t) {
void arch_dump_thread(const thread_t *t) {
if (t->state != THREAD_RUNNING) {
dprintf(INFO, "\tarch: ");
dprintf(INFO, "sp 0x%lx\n", t->arch.sp);

View File

@@ -172,8 +172,8 @@ status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout);
status_t thread_detach_and_resume(thread_t *t);
status_t thread_set_real_time(thread_t *t);
void dump_thread(thread_t *t);
void arch_dump_thread(thread_t *t);
void dump_thread(const thread_t *t);
void arch_dump_thread(const thread_t *t);
void dump_all_threads(void);
void dump_all_threads_unlocked(void);
void dump_threads_stats(void);

View File

@@ -944,7 +944,7 @@ static const char *thread_state_to_str(enum thread_state state) {
}
}
static size_t thread_stack_used(thread_t *t) {
static size_t thread_stack_used(const thread_t *t) {
#ifdef THREAD_STACK_HIGHWATER
uint8_t *stack_base;
size_t stack_size;
@@ -965,7 +965,7 @@ static size_t thread_stack_used(thread_t *t) {
/**
* @brief Dump debugging info about the specified thread.
*/
void dump_thread(thread_t *t) {
void dump_thread(const thread_t *t) {
dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
#if WITH_SMP
dprintf(INFO, "\tstate %s, curr_cpu %d, pinned_cpu %d, priority %d, remaining quantum %d\n",