[arch][m68k] start to enable the VM on m68k

-Wire up the make plumbing for setting the kernel vm bits
-Implement stubbed out arch_mmu routines.
-Set up a high mapping in start.S that should be compatible with the mmu
code.
This commit is contained in:
Travis Geiselbrecht
2025-09-07 18:20:52 -07:00
parent 5926fb1cc8
commit 6c7e6a1796
5 changed files with 256 additions and 22 deletions

View File

@@ -0,0 +1,20 @@
/*
* Copyright (c) 2025 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <arch/m68k/mmu.h>
#include <sys/types.h>
struct arch_aspace {
/* pointer to the translation table */
paddr_t pgtable_phys;
void *pgtable_virt;
uint32_t flags;
};

View File

@@ -7,7 +7,10 @@
*/
#include "arch/m68k/mmu.h"
#if M68K_MMU
#include <assert.h>
#include <kernel/vm.h>
#include <lk/err.h>
#include <lk/trace.h>
#include <stdint.h>
@@ -15,7 +18,18 @@
#define LOCAL_TRACE 1
#if M68K_MMU
// initial mappings set up in start.S using the DTTR and ITTR registers
struct mmu_initial_mapping mmu_initial_mappings[] = {
// all of memory, mapped in start.S
{
.phys = 0,
.virt = KERNEL_ASPACE_BASE,
.size = ROUNDUP(MEMSIZE, 32 * 1024 * 1024), // round up to next 32MB
.flags = 0,
.name = "physmap"},
// null entry to terminate the list
{}};
#if M68K_MMU == 68040
@@ -343,6 +357,7 @@ static status_t map_range(vaddr_t va, paddr_t pa, size_t len_minus_one) {
void m68k_mmu_init(void) {
LTRACE_ENTRY;
#if 0
// set up some helpful maps for qemu virt
map_range(0, 0, 64 * 1024 * 1024 - 1);
map_range(0xff000000, 0xff000000, 0 - 0xff000000 - 1);
@@ -355,10 +370,64 @@ void m68k_mmu_init(void) {
set_srp((uint32_t)(uintptr_t)kernel_pgtable);
set_urp((uint32_t)(uintptr_t)kernel_pgtable);
set_tc((1 << 15)); // enable, 4K pages
#endif
dump_mmu_regs();
LTRACE_EXIT;
}
// arch mmu routines
#endif // M68K_MMU
#if ARCH_HAS_MMU
#include <arch/mmu.h>
// Default stub implementations for arch_mmu routines
bool arch_mmu_supports_nx_mappings(void) {
return false;
}
bool arch_mmu_supports_ns_mappings(void) {
return false;
}
bool arch_mmu_supports_user_aspaces(void) {
return false;
}
status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) {
return ERR_NOT_SUPPORTED;
}
status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
return ERR_NOT_SUPPORTED;
}
int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) {
return ERR_NOT_SUPPORTED;
}
int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
return ERR_NOT_SUPPORTED;
}
status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) {
return ERR_NOT_SUPPORTED;
}
vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace,
vaddr_t base, uint prev_region_arch_mmu_flags,
vaddr_t end, uint next_region_arch_mmu_flags,
vaddr_t align, size_t size, uint arch_mmu_flags) {
return (vaddr_t)NULL;
}
void arch_mmu_context_switch(arch_aspace_t *aspace) {
// no-op
}
#endif // ARCH_HAS_MMU

View File

@@ -49,16 +49,31 @@ ifeq (true, $(call TOBOOL, $(WITH_MMU)))
ifeq ($(M68K_MMU),)
$(error WITH_MMU is set but no M68K_MMU is set)
endif
# we have a mmu and want the vmm/pmm
WITH_KERNEL_VM := 1
GLOBAL_DEFINES += ARCH_HAS_MMU=1
GLOBAL_DEFINES += M68K_MMU=$(M68K_MMU)
# Have the kernel occupy the top 2GB of the address space.
# This puts the kernel at 0x8000.0000
GLOBAL_DEFINES += \
KERNEL_ASPACE_BASE=0x80000000 \
KERNEL_ASPACE_SIZE=0x80000000
KERNEL_BASE ?= 0x80000000
KERNEL_LOAD_OFFSET ?= 0
else
KERNEL_BASE ?= $(MEMBASE)
KERNEL_LOAD_OFFSET ?= 0
endif
ARCH_OPTFLAGS := -O2
KERNEL_BASE ?= $(MEMBASE)
KERNEL_LOAD_OFFSET ?= 0
GLOBAL_DEFINES += MEMBASE=$(MEMBASE)
GLOBAL_DEFINES += MEMSIZE=$(MEMSIZE)
GLOBAL_DEFINES += KERNEL_BASE=$(KERNEL_BASE)
GLOBAL_DEFINES += KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET)
GLOBAL_DEFINES += M68K_CPU=$(M68K_CPU)
GLOBAL_DEFINES += M68K_CPU_$(M68K_CPU)=1

View File

@@ -11,10 +11,10 @@
FUNCTION(_start)
// load the first 4 args that were pushed on whatever stack we have
// NOTE: assumes stack is pointing at at least readable memory
movl %sp@(4),%d0
movl %sp@(8),%d1
movl %sp@(12),%d2
movl %sp@(16),%d3
movl %sp@(4),%d4
movl %sp@(8),%d5
movl %sp@(12),%d6
movl %sp@(16),%d7
#if ARCH_DO_RELOCATION
lea %pc@(_start),%a0 // load the current address using PC relative addressing mode
@@ -39,8 +39,8 @@ FUNCTION(_start)
// clear bss
bss_clear:
lea __bss_start,%a0
lea __bss_end,%a1
lea %pc@(__bss_start),%a0
lea %pc@(__bss_end),%a1
cmpl %a0,%a1
beqs 1f
// zero 4 bytes at a time
@@ -50,14 +50,81 @@ bss_clear:
bne 0b
1:
#if M68K_MMU == 68040
init_mmu_68040:
// Set up DTTR0 and ITTR0 to map 0x00000000 - 0x3FFFFFFF (1GB) to 0x00000000
// Logical address base: 0x00000000, mask 0x3f000000, enable, supervisor, cacheble, copyback
movl #0x003fa020,%d0
movec %d0,%dtt0
movec %d0,%itt0
// Set up an mapping of [0, MEMSIZE) to [KERNEL_ASPACE_BASE, KERNEL_ASPACE_BASE + MEMSIZE)
// Set up L0 entries
lea %pc@(_root_page_table),%a0
addl #(KERNEL_ASPACE_BASE / L0_ENTRY_RANGE * 4),%a0 // offset into the middle of the L0 table for KERNEL_ASPACE_BASE
movl #L0_ENTRIES,%d0
lea %pc@(_l1_tables),%a1 // get pointer to L1 tables
addl #0x00000003,%a1 // mark it valid
.Ll0_loop:
movl %a1,%a0@ // store it in the L0 table
addl #4,%a0 // advance to next L0 entry
addl #L1_PGTABLE_ENTRIES * 4,%a1 // advance to next L1 table
subl #1,%d0
bne .Ll0_loop
// Set up L1 entries
lea %pc@(_l1_tables),%a0
movl #L1_ENTRIES,%d0
lea %pc@(_l2_tables),%a1 // get pointer to L2 table
addl #0x00000003,%a1 // mark it valid
.Ll1_loop:
movl %a1,%a0@
addl #4,%a0
addl #L2_PGTABLE_ENTRIES * 4,%a1 // advance to next L1 table
subl #1,%d0
bne .Ll1_loop
// Set up L2 entries
lea %pc@(_l2_tables),%a0
movl #L2_ENTRIES,%d0
movl #0x000000083,%d1 // address 0, supervisor, writable, present
.L2_loop:
movl %d1,%a0@ // read the current entry
addl #4,%a0 // advance to next L2 entry
addl #PAGE_SIZE,%d1 // advance to next page
subl #1,%d0
bne .L2_loop
// set the supervisor root pointer
lea %pc@(_root_page_table),%a0
movec %a0,%srp
movec %a0,%urp
// enable the mmu
movl #(1<<15),%d0
movec %d0,%tc
// Branch to the high memory area
movl #.Lhigh_target,%a0
jmp %a0@
.Lhigh_target:
// Turn off DTTR0 and ITTR0
clrl %d0
movec %d0,%dtt0
movec %d0,%itt0
#endif
// load the initial stack pointer
lea _default_stack_top,%sp
// branch into C land with 4 args off the previous stack
movl %d3,%sp@-
movl %d2,%sp@-
movl %d1,%sp@-
movl %d0,%sp@-
movl %d7,%sp@-
movl %d6,%sp@-
movl %d5,%sp@-
movl %d4,%sp@-
jsr lk_main
// if we return from main just loop forever
@@ -65,8 +132,35 @@ bss_clear:
END_FUNCTION(_start)
.bss
.align 4
.balign 4
_default_stack_base:
.skip 4096
_default_stack_top:
#if M68K_MMU == 68040
// Define space for page tables to set up a mapping of MEMSIZE bytes of memory at KERNEL_ASPACE_BASE
.equ PAGE_SIZE, 4096
.equ L0_PGTABLE_ENTRIES, 128 // 7 bits
.equ L0_ENTRY_RANGE, (1<<25) // each L0 entry covers 32MB
.equ L1_PGTABLE_ENTRIES, 128 // 7 bits
.equ L1_ENTRY_RANGE, (1<<18) // each L1 entry covers 256KB
.equ L2_PGTABLE_ENTRIES, 64 // 6 bits
// Number of entries at each level to fill in order to cover MEMSIZE,
// rounded up to the next L0 entry range so all of the L1 and L2 page tables are fully used.
.equ MEMSIZE_ROUNDED, (MEMSIZE + L0_ENTRY_RANGE - 1) & ~(L0_ENTRY_RANGE - 1)
.equ L0_ENTRIES, MEMSIZE_ROUNDED / L0_ENTRY_RANGE
.equ L1_ENTRIES, MEMSIZE_ROUNDED / L1_ENTRY_RANGE
.equ L2_ENTRIES, MEMSIZE_ROUNDED / PAGE_SIZE
.balign 4096
_root_page_table:
.skip L0_PGTABLE_ENTRIES * 4 // 128 entries, 4 bytes each
.balign 4096
_l1_tables:
.skip L1_ENTRIES * 4 // 4 bytes each, one per 256KB section of memory
.balign 4096
_l2_tables:
.skip L2_ENTRIES * 4 // 4 bytes each, one per page of memory
#endif // M68K_MMU == 68040

View File

@@ -5,17 +5,18 @@
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <dev/virtio.h>
#include <dev/virtio/net.h>
#include <kernel/thread.h>
#include <lk/err.h>
#include <lk/reg.h>
#include <lk/trace.h>
#include <kernel/thread.h>
#include <platform.h>
#include <platform/interrupts.h>
#include <platform/debug.h>
#include <platform/interrupts.h>
#include <platform/timer.h>
#include <platform/virt.h>
#include <sys/types.h>
#include <dev/virtio.h>
#include <dev/virtio/net.h>
#if WITH_LIB_MINIP
#include <lib/minip.h>
#endif
@@ -30,7 +31,38 @@
#define LOCAL_TRACE 0
// Add the one memory region we have detected from the bootinfo
static status_t add_memory_region(paddr_t base, size_t size, uint flags) {
#if WITH_KERNEL_VM
static pmm_arena_t arena;
arena.name = "mem";
arena.base = base;
arena.size = size;
arena.priority = 1;
arena.flags = PMM_ARENA_FLAG_KMAP | flags;
status_t err = pmm_add_arena(&arena);
if (err < 0) {
panic("pmm_add_arena failed\n");
}
return err;
#else
novm_add_arena("mem", base, size);
return NO_ERROR;
#endif
}
void platform_early_init(void) {
#if M68K_MMU == 68040
// use DTTR1 to map in all of peripheral space
// map 0xff000000 - 0xffffffff (16MB) to 0xff000000
// Logical address base: 0xff000000, mask 0x00000000, enable, supervisor, noncachable, serialized
uint32_t ttbr1 = 0xff00a040;
asm volatile("movec %0, %%dtt1" ::"r"(ttbr1) : "memory");
#endif
goldfish_tty_early_init();
pic_early_init();
goldfish_rtc_early_init();
@@ -55,7 +87,7 @@ void platform_early_init(void) {
uint32_t memsize = *(const uint32_t *)((uintptr_t)ptr + 4);
dprintf(INFO, "VIRT: memory base %#x size %#x\n", membase, memsize);
novm_add_arena("mem", membase, memsize);
add_memory_region((paddr_t)membase, (size_t)memsize, 0);
// TODO: read the rest of the device bootinfo records and dynamically locate devices
}
@@ -65,6 +97,10 @@ void platform_init(void) {
goldfish_tty_init();
goldfish_rtc_init();
#if M68K_MMU == 68040
// TODO: create a VM reservation for peripheral space thats using DTTR1
#endif
/* detect any virtio devices */
uint virtio_irqs[NUM_VIRT_VIRTIO];
for (int i = 0; i < NUM_VIRT_VIRTIO; i++) {
@@ -90,7 +126,7 @@ void platform_init(void) {
virtio_net_start();
//minip_start_static(ip_addr, ip_mask, ip_gateway);
// minip_start_static(ip_addr, ip_mask, ip_gateway);
minip_start_dhcp();
}
#endif