[arch][x86] get SMP working on x86-32

- Added very basic user page table support (needed to bootstrap the
  secondary cpus)
- Added MP bootup code for 32bit.
This commit is contained in:
Travis Geiselbrecht
2025-04-06 19:09:32 -07:00
parent 8fdadd9b33
commit 71e795de19
6 changed files with 121 additions and 40 deletions

View File

@@ -23,7 +23,12 @@
#include <string.h>
#include <sys/types.h>
// TODO:
// - proper tlb flush (local and SMP)
// - synchronization of top level page tables for user space aspaces
#define LOCAL_TRACE 0
#define TRACE_CONTEXT_SWITCH 0
/* top level kernel page tables, initialized in start.S */
#if X86_LEGACY
@@ -309,8 +314,6 @@ static status_t x86_mmu_unmap(map_addr_t * const init_table, const vaddr_t vaddr
}
int arch_mmu_unmap(arch_aspace_t * const aspace, const vaddr_t vaddr, const uint count) {
map_addr_t init_table_from_cr3;
LTRACEF("aspace %p, vaddr %#lx, count %u\n", aspace, vaddr, count);
DEBUG_ASSERT(aspace);
@@ -321,10 +324,7 @@ int arch_mmu_unmap(arch_aspace_t * const aspace, const vaddr_t vaddr, const uint
if (count == 0)
return NO_ERROR;
DEBUG_ASSERT(x86_get_cr3());
init_table_from_cr3 = x86_get_cr3();
return (x86_mmu_unmap(paddr_to_kvaddr(init_table_from_cr3), vaddr, count));
return (x86_mmu_unmap(aspace->cr3, vaddr, count));
}
/**
@@ -372,12 +372,9 @@ status_t arch_mmu_query(arch_aspace_t * const aspace, const vaddr_t vaddr, paddr
if (!paddr)
return ERR_INVALID_ARGS;
DEBUG_ASSERT(x86_get_cr3());
uint32_t current_cr3_val = (map_addr_t)x86_get_cr3();
arch_flags_t ret_flags;
uint32_t ret_level;
status_t stat = x86_mmu_get_mapping(paddr_to_kvaddr(current_cr3_val), vaddr, &ret_level, &ret_flags, paddr);
status_t stat = x86_mmu_get_mapping(aspace->cr3, vaddr, &ret_level, &ret_flags, paddr);
if (stat)
return stat;
@@ -404,15 +401,12 @@ int arch_mmu_map(arch_aspace_t * const aspace, const vaddr_t vaddr, const paddr_
if (count == 0)
return NO_ERROR;
DEBUG_ASSERT(x86_get_cr3());
uint32_t current_cr3_val = (map_addr_t)x86_get_cr3();
struct map_range range;
range.start_vaddr = vaddr;
range.start_paddr = (map_addr_t)paddr;
range.size = count * PAGE_SIZE;
return (x86_mmu_map_range(paddr_to_kvaddr(current_cr3_val), &range, flags));
return (x86_mmu_map_range(aspace->cr3, &range, flags));
}
bool arch_mmu_supports_nx_mappings(void) { return false; }
@@ -447,8 +441,43 @@ void x86_mmu_init(void) {
status_t arch_mmu_init_aspace(arch_aspace_t * const aspace, const vaddr_t base, const size_t size, const uint flags) {
DEBUG_ASSERT(aspace);
if ((flags & ARCH_ASPACE_FLAG_KERNEL) == 0) {
return ERR_NOT_SUPPORTED;
TRACEF("aspace %p, base %#lx, size %#zx, flags %#x\n", aspace, base, size, flags);
/* validate that the base + size is sane and doesn't wrap */
DEBUG_ASSERT(size > PAGE_SIZE);
DEBUG_ASSERT(base + size - 1 > base);
aspace->flags = flags;
aspace->flags = flags;
if (flags & ARCH_ASPACE_FLAG_KERNEL) {
/* at the moment we can only deal with address spaces as globally defined */
DEBUG_ASSERT(base == KERNEL_ASPACE_BASE);
DEBUG_ASSERT(size == KERNEL_ASPACE_SIZE);
aspace->base = base;
aspace->size = size;
aspace->cr3 = kernel_pd;
aspace->cr3_phys = vaddr_to_paddr(aspace->cr3);
} else {
DEBUG_ASSERT(base == USER_ASPACE_BASE);
DEBUG_ASSERT(size == USER_ASPACE_SIZE);
aspace->base = base;
aspace->size = size;
map_addr_t *va = pmm_alloc_kpages(1, NULL);
if (!va) {
return ERR_NO_MEMORY;
}
aspace->cr3 = va;
aspace->cr3_phys = vaddr_to_paddr(aspace->cr3);
/* copy the top entries from the kernel top table */
memcpy(aspace->cr3 + NO_OF_PT_ENTRIES/2, kernel_pd + NO_OF_PT_ENTRIES/2, PAGE_SIZE/2);
/* zero out the rest */
memset(aspace->cr3, 0, PAGE_SIZE/2);
}
return NO_ERROR;
@@ -459,8 +488,22 @@ status_t arch_mmu_destroy_aspace(arch_aspace_t * const aspace) {
}
void arch_mmu_context_switch(arch_aspace_t * const aspace) {
if (aspace != NULL) {
PANIC_UNIMPLEMENTED;
if (TRACE_CONTEXT_SWITCH)
TRACEF("aspace %p\n", aspace);
uint64_t cr3;
if (aspace) {
DEBUG_ASSERT((aspace->flags & ARCH_ASPACE_FLAG_KERNEL) == 0);
cr3 = aspace->cr3_phys;
} else {
// TODO save copy of this
cr3 = vaddr_to_paddr(kernel_pd);
}
if (TRACE_CONTEXT_SWITCH) {
TRACEF("cr3 %#llx\n", cr3);
}
x86_set_cr3(cr3);
}

View File

@@ -26,6 +26,10 @@
#define LOCAL_TRACE 0
#define TRACE_CONTEXT_SWITCH 0
// TODO:
// - proper tlb flush (local and SMP)
// - synchronization of top level page tables for user space aspaces
/* Address width including virtual/physical address*/
static uint8_t vaddr_width = 0;
static uint8_t paddr_width = 0;
@@ -672,7 +676,7 @@ void x86_mmu_init(void) {
status_t arch_mmu_init_aspace(arch_aspace_t * const aspace, const vaddr_t base, const size_t size, const uint flags) {
DEBUG_ASSERT(aspace);
LTRACEF("aspace %p, base %#lx, size %zu, flags %#x\n", aspace, base, size, flags);
LTRACEF("aspace %p, base %#lx, size %#zx, flags %#x\n", aspace, base, size, flags);
/* validate that the base + size is sane and doesn't wrap */
DEBUG_ASSERT(size > PAGE_SIZE);

View File

@@ -26,6 +26,7 @@
extern enum handler_return platform_irq(x86_iframe_t *frame);
static void dump_fault_frame(x86_iframe_t *frame) {
dprintf(CRITICAL, "cpu %u:\n", arch_curr_cpu_num());
#if ARCH_X86_32
dprintf(CRITICAL, " CS: %04hx EIP: %08x EFL: %08x CR2: %08lx\n",
frame->cs, frame->ip, frame->flags, x86_get_cr2());

View File

@@ -52,7 +52,7 @@ void x86_configure_percpu_early(uint cpu_num, uint apic_id) {
write_msr(X86_MSR_IA32_GS_BASE, (uint64_t)percpu);
#else
// set up a gs descriptor for this cpu
uint16_t selector = PERCPU_SELECTOR_BASE + cpu_num;
uint16_t selector = PERCPU_SELECTOR_BASE + cpu_num * 8;
x86_set_gdt_descriptor(selector, percpu, sizeof(*percpu), 1, 0, 1, SEG_TYPE_DATA_RW, 0, 1);
x86_set_gs(selector);
#endif
@@ -84,8 +84,7 @@ status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
return NO_ERROR;
}
void arch_mp_init_percpu(void) {
}
void arch_mp_init_percpu(void) {}
uint32_t x86_get_apic_id_from_hardware(void) {
// read the apic id out of cpuid leaf 1, which should be present if SMP is enabled.