4 Commits

Author SHA1 Message Date
Travis Geiselbrecht
b433d4582d [arch][m68k] careful of PC relative addressing on 68000 and 68010
These cpus only support a simple 16 bit offset from PC, so cannot use it
in start.S to compute a large offset. This is okay, because the code
that needs it is only for cpus with an MMU, which these dont have.
2025-09-22 21:52:48 -07:00
Travis Geiselbrecht
bb5db64b4c [arch][m68k] add arch_mmu_query so the kernel boots completely
Side effect of editing: reformat all of the code in
platform/qemu-virt-m68k
2025-09-22 21:52:48 -07:00
Travis Geiselbrecht
6c7e6a1796 [arch][m68k] start to enable the VM on m68k
-Wire up the make plumbing for setting the kernel vm bits
-Implement stubbed out arch_mmu routines.
-Set up a high mapping in start.S that should be compatible with the mmu
code.
2025-09-22 21:52:48 -07:00
Travis Geiselbrecht
5926fb1cc8 [arch][m68k] Initial support for 68040 mmu
Currently just sets up most of an identity map and some test mappings.
Not fully wired up to the VM yet.
2025-09-07 16:23:16 -07:00
17 changed files with 837 additions and 97 deletions

View File

@@ -10,6 +10,7 @@
#include <stdint.h>
#include <arch/ops.h>
#include <arch/m68k.h>
#include <arch/m68k/mmu.h>
#include <kernel/spinlock.h>
#define LOCAL_TRACE 0
@@ -24,10 +25,16 @@ void arch_early_init(void) {
extern uint32_t exc_vectors[256];
asm volatile("movec %0, %%vbr" :: "r"(exc_vectors));
#endif
#if M68K_MMU
m68k_mmu_early_init();
#endif
}
void arch_init(void) {
LTRACE;
#if M68K_MMU
m68k_mmu_init();
#endif
}
void arch_idle(void) {

View File

@@ -9,6 +9,7 @@
#include <inttypes.h>
#include <lk/debug.h>
#include <lk/trace.h>
#include <assert.h>
#include <kernel/thread.h>
#include <target.h>
@@ -24,6 +25,27 @@ typedef struct m68k_iframe {
uint16_t vector_offset : 12;
} m68k_iframe_t;
typedef struct m68k_iframe_format_7 {
m68k_iframe_t base;
uint32_t effective_address;
uint16_t ssw;
uint16_t wb3_status;
uint16_t wb2_status;
uint16_t wb1_status;
uint32_t fault_address;
uint32_t wb3_address;
uint32_t wb3_data;
uint32_t wb2_address;
uint32_t wb2_data;
uint32_t wb1_address;
uint32_t wb1_data;
uint32_t push_data_1;
uint32_t push_data_2;
uint32_t push_data_3;
} m68k_iframe_format_7_t;
static_assert(sizeof(m68k_iframe_format_7_t) - sizeof(m68k_iframe_t) == (0x3c - 0x8), "");
void dump_iframe(const m68k_iframe_t *iframe) {
printf("pc 0x%08x sr 0x%04x format %#x vector %#x\n", iframe->pc_low | iframe->pc_high << 16, iframe->sr,
iframe->format, iframe->vector_offset / 4);
@@ -33,10 +55,28 @@ void dump_iframe(const m68k_iframe_t *iframe) {
printf("a4 0x%08x a5 0x%08x a6 0x%08x\n", iframe->a[4], iframe->a[5], iframe->a[6]);
}
static void access_fault(m68k_iframe_t *frame) {
printf("access fault\n");
dump_iframe(frame);
// dump additional frame 7 stuff
m68k_iframe_format_7_t *f7 = (m68k_iframe_format_7_t *)frame;
printf("effective address %#" PRIx32 "\n", f7->effective_address);
printf("special status word %#" PRIx16 "\n", f7->ssw);
printf("halting\n");
for (;;);
}
void m68k_exception(m68k_iframe_t *frame) {
uint8_t code = frame->vector_offset / 4;
LTRACEF("frame %p, code %#hhx\n", frame, code);
TRACEF("frame %p, code %#hhx\n", frame, code);
switch (code ) {
case 2:
access_fault(frame);
break;
}
dump_iframe(frame);

View File

@@ -0,0 +1,20 @@
/*
* Copyright (c) 2025 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <arch/m68k/mmu.h>
#include <sys/types.h>
struct arch_aspace {
/* pointer to the translation table */
paddr_t pgtable_phys;
void *pgtable_virt;
uint32_t flags;
};

View File

@@ -0,0 +1,15 @@
/*
* Copyright (c) 2024 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#if M68K_MMU
void m68k_mmu_early_init(void);
void m68k_mmu_init(void);
#endif // M68K_MMU

467
arch/m68k/mmu.c Normal file
View File

@@ -0,0 +1,467 @@
/*
* Copyright (c) 2024 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include "arch/m68k/mmu.h"
#if M68K_MMU
#include <arch/mmu.h>
#include <arch/spinlock.h>
#include <assert.h>
#include <kernel/vm.h>
#include <lk/err.h>
#include <lk/trace.h>
#include <stdint.h>
#include <stdlib.h>
#define LOCAL_TRACE 0
// initial mappings set up in start.S
struct mmu_initial_mapping mmu_initial_mappings[] = {
// all of memory, mapped in start.S
{
.phys = 0,
.virt = KERNEL_ASPACE_BASE,
.size = ROUNDUP(MEMSIZE, 32 * 1024 * 1024), // round up to next 32MB
.flags = 0,
.name = "physmap"},
// null entry to terminate the list
{}};
#if M68K_MMU == 68040
// 68040's layout is
// 4 or 8K pages. only affects the bottom level
// 32 bit entries at all levels
// L0, L1, L2
// bits: 7, 7, 6, 12 (4K pages)
// entries: 128, 128, 64
// bytes/table: 512, 512, 256
//
// if using 4K page tables for L1 and L2, and using a small root table (L0):
// L0, L1, L2
// entries: 128, 8*128 (1024), 16*64 (1024)
// usable entries per level: 128/8, 1024/16, 1024
// 68040 L2 table entry
typedef struct pte {
uint32_t page_address : 20;
uint32_t ur : 1;
uint32_t g : 1;
uint32_t u1 : 1;
uint32_t u0 : 1;
uint32_t s : 1;
uint32_t cm : 2;
uint32_t m : 1;
uint32_t u : 1;
uint32_t w : 1;
uint32_t pdt : 2;
} pte_t;
static_assert(sizeof(pte_t) == 4, "");
// 68040 L1 table entry
typedef struct ptp {
uint32_t table_address : 24;
uint32_t _1 : 4;
uint32_t u : 1;
uint32_t w : 1;
uint32_t udt : 2;
} ptp_t;
static_assert(sizeof(ptp_t) == 4, "");
// 68040 L0 table entry
typedef struct root_ptp {
uint32_t table_address : 23;
uint32_t _1 : 5;
uint32_t u : 1;
uint32_t w : 1;
uint32_t udt : 2;
} root_ptp_t;
static_assert(sizeof(root_ptp_t) == 4, "");
// some constants based on this
#define L0_SHIFT_RAW 7
#define L1_SHIFT_RAW 7
#define L2_SHIFT_RAW 6
// number of entries to repeat per level to get our emulated tables
#define L0_REPEAT_SHIFT 3
#define L1_REPEAT_SHIFT 4
#define L0_REPEATS (1 << L0_REPEAT_SHIFT)
#define L1_REPEATS (1 << L1_REPEAT_SHIFT)
static_assert(L0_REPEATS == 8, "");
static_assert(L1_REPEATS == 16, "");
// number of entries per level
#define L0_ENTRIES_RAW (1 << L0_SHIFT_RAW)
#define L1_ENTRIES_RAW (1 << (L1_SHIFT_RAW + L0_REPEAT_SHIFT))
#define L2_ENTRIES_RAW (1 << (L2_SHIFT_RAW + L1_REPEAT_SHIFT))
static_assert(L0_ENTRIES_RAW == 128, "");
static_assert(L1_ENTRIES_RAW == 1024, "");
static_assert(L2_ENTRIES_RAW == 1024, "");
// number of bytes per level
#define L0_BYTES (L0_ENTRIES_RAW * sizeof(root_ptp_t))
#define L1_BYTES (L1_ENTRIES_RAW * sizeof(ptp_t))
#define L2_BYTES (L2_ENTRIES_RAW * sizeof(pte_t))
static_assert(L0_BYTES == 512, "");
static_assert(L1_BYTES == 4096, "");
static_assert(L2_BYTES == 4096, "");
// number of unique entries per level
// pow2 4+6+10
#define L0_ENTRIES (1u << (L0_SHIFT_RAW - L0_REPEAT_SHIFT))
#define L1_ENTRIES (1u << (L1_SHIFT_RAW - L1_REPEAT_SHIFT + L0_REPEAT_SHIFT))
#define L2_ENTRIES (1u << (L2_SHIFT_RAW + L1_REPEAT_SHIFT))
static_assert(L0_ENTRIES == 16, "");
static_assert(L1_ENTRIES == 64, "");
static_assert(L2_ENTRIES == 1024, "");
// for a given virtual address, which bits correspond to what layer of the table
#define L0_VADDR_SHIFT (32 - L0_SHIFT_RAW)
#define L1_VADDR_SHIFT (L0_VADDR_SHIFT - L1_SHIFT_RAW)
#define L2_VADDR_SHIFT (L1_VADDR_SHIFT - L2_SHIFT_RAW)
static_assert(L0_VADDR_SHIFT == 25, "");
static_assert(L1_VADDR_SHIFT == 18, "");
static_assert(L2_VADDR_SHIFT == 12, "");
static volatile root_ptp_t kernel_pgtable[L0_ENTRIES_RAW] __ALIGNED(L0_BYTES);
#else
// TODO: support 65030 in the future, probably using identical page table sizes
#error "unsupported m68k mmu"
#endif
static status_t alloc_pgtable(paddr_t *paddrp) {
#if WITH_KERNEL_VM
vm_page_t *p = pmm_alloc_page();
if (!p) {
return ERR_NO_MEMORY;
}
*paddrp = vm_page_to_paddr(p);
#else
// XXX hack for now before we allocate from PMM
static uint32_t pgtables[L0_ENTRIES * L1_ENTRIES * L2_ENTRIES] __ALIGNED(PAGE_SIZE);
static size_t next_pgtable = 0;
*paddrp = (paddr_t)&pgtables[next_pgtable * L2_ENTRIES];
next_pgtable++;
LTRACEF("returning %#lx\n", *paddrp);
#endif
return NO_ERROR;
}
// given a vaddr, generate the virtual index of the page table at that level.
// Needs to be shifted by Ln_REPEAT_SHIFT to get the raw entry
static uint get_l0_index(vaddr_t vaddr) {
return (vaddr >> (L0_VADDR_SHIFT + L0_REPEAT_SHIFT)) & (L0_ENTRIES - 1);
}
static uint get_l1_index(vaddr_t vaddr) {
return (vaddr >> (L1_VADDR_SHIFT + L1_REPEAT_SHIFT)) & (L1_ENTRIES - 1);
}
static uint get_l2_index(vaddr_t vaddr) {
return (vaddr >> L2_VADDR_SHIFT) & (L2_ENTRIES - 1);
}
// Return a pointer to the first, possibly repeated, page table pointer in this level.
// Any updates will need to be repeated L0_REPEATS
static volatile root_ptp_t *get_l0_ptp_base_ptr(volatile root_ptp_t *table, vaddr_t vaddr) {
const unsigned int idx = get_l0_index(vaddr) << L0_REPEAT_SHIFT;
LTRACEF_LEVEL(3, "vaddr %#lx shifted idx: %u\n", vaddr, idx);
return &table[idx];
}
// Return a pointer to the first, possibly repeated, page table pointer in this level.
// Any updates will need to be repeated L1_REPEATS
static volatile ptp_t *get_l1_ptp_base_ptr(volatile ptp_t *table, vaddr_t vaddr) {
const unsigned int idx = get_l1_index(vaddr) << L1_REPEAT_SHIFT;
LTRACEF_LEVEL(3, "vaddr %#lx shifted idx: %u\n", vaddr, idx);
return &table[idx];
}
__NO_INLINE static void map_l0(volatile root_ptp_t *root_table, vaddr_t vaddr, paddr_t paddr) {
LTRACEF("vaddr %#lx paddr %#lx\n", vaddr, paddr);
volatile root_ptp_t *entry = get_l0_ptp_base_ptr(root_table, vaddr);
for (uint i = 0; i < L0_REPEATS; i++) {
const paddr_t pa = paddr + i * (1u << L1_SHIFT_RAW) * sizeof(ptp_t);
const root_ptp_t ptp = {
.table_address = pa >> 9,
.u = 0, // not used
.w = 0, // not write protected
.udt = 3, // resident
};
entry[i] = ptp;
LTRACEF_LEVEL(2, "real addr: %lx, index %u, shifted index %u\n", pa, get_l0_index(vaddr), (get_l0_index(vaddr) << L0_REPEAT_SHIFT) + i);
}
}
__NO_INLINE static void map_l1(volatile ptp_t *table, vaddr_t vaddr, paddr_t paddr) {
LTRACEF("vaddr %#lx paddr %#lx\n", vaddr, paddr);
volatile ptp_t *entry = get_l1_ptp_base_ptr(table, vaddr);
for (unsigned int i = 0; i < L1_REPEATS; i++) {
const paddr_t pa = paddr + i * (1u << L2_SHIFT_RAW) * sizeof(ptp_t);
const ptp_t ptp = {
.table_address = pa >> 8,
.u = 0, // not used
.w = 0, // not write protected
.udt = 3, // resident
};
entry[i] = ptp;
LTRACEF_LEVEL(2, "real addr: %lx, index %u, shifted index %u\n", pa, get_l1_index(vaddr), (get_l1_index(vaddr) << L1_REPEAT_SHIFT) + i);
}
}
__NO_INLINE static void map_l2(volatile pte_t *table, vaddr_t vaddr, paddr_t addr) {
const unsigned int idx = get_l2_index(vaddr);
LTRACEF_LEVEL(2, "vaddr %#lx paddr %#lx, shifted idx: %u\n", vaddr, addr, idx);
DEBUG_ASSERT(idx < L2_ENTRIES);
const pte_t pte = {
.page_address = addr >> 12,
.g = 0, // not global
.s = 1, // supervisor
.cm = 0, // cache mode, cacheable
.m = 0, // not modified
.u = 0, // not used
.w = 0, // not write protected
.pdt = 1, // resident
};
table[idx] = pte;
}
#define MMU_REG_ACCESSOR(reg) \
static uint32_t get_##reg(void) { \
uint32_t reg; \
asm volatile("movec %%" #reg ", %0" : "=r"(reg)::"memory"); \
return reg; \
} \
static void set_##reg(uint32_t val) { \
asm volatile("movec %0, %%" #reg ::"r"(val) : "memory"); \
}
// Control register accessors
MMU_REG_ACCESSOR(tc);
MMU_REG_ACCESSOR(itt0);
MMU_REG_ACCESSOR(itt1);
MMU_REG_ACCESSOR(dtt0);
MMU_REG_ACCESSOR(dtt1);
MMU_REG_ACCESSOR(mmusr);
MMU_REG_ACCESSOR(urp);
MMU_REG_ACCESSOR(srp);
static void dump_mmu_regs(void) {
// Dump all the registers
printf("TC %#x\n", get_tc());
printf("ITT0 %#x\n", get_itt0());
printf("ITT1 %#x\n", get_itt1());
printf("DTT0 %#x\n", get_dtt0());
printf("DTT1 %#x\n", get_dtt1());
printf("MMUSR %#x\n", get_mmusr());
printf("URP %#x\n", get_urp());
printf("SRP %#x\n", get_srp());
}
static bool is_l0_entry_valid(root_ptp_t entry) {
// 0, 1 == invalid
// 2, 3 == valid
return entry.udt > 2;
}
static bool is_l1_entry_valid(ptp_t entry) {
// 0, 1 == invalid
// 2, 3 == valid
return entry.udt > 2;
}
static bool is_l2_entry_valid(pte_t entry) {
// 0 == invalid
// 1, 2 == valid
// 3 == indirect pointer (unused)
return entry.pdt == 1 || entry.pdt == 2;
}
void m68k_mmu_early_init(void) {}
static status_t map_range(vaddr_t va, paddr_t pa, size_t len_minus_one) {
DEBUG_ASSERT(IS_ALIGNED(va, PAGE_SIZE));
DEBUG_ASSERT(IS_ALIGNED(pa, PAGE_SIZE));
DEBUG_ASSERT(IS_ALIGNED(len_minus_one + 1, PAGE_SIZE));
const vaddr_t terminal_va = va + len_minus_one + 1;
// iterate over the L0 level
for (;;) {
const root_ptp_t l0_entry = *get_l0_ptp_base_ptr(kernel_pgtable, va);
volatile ptp_t *l1_pgtable;
if (!is_l0_entry_valid(l0_entry)) {
// allocate a page table
paddr_t pgtable;
status_t err = alloc_pgtable(&pgtable);
if (err < 0) {
TRACEF("error allocating L1 page table\n");
return err;
}
map_l0(kernel_pgtable, va, pgtable);
l1_pgtable = (volatile ptp_t *)pgtable;
} else {
l1_pgtable = (volatile ptp_t *)((uintptr_t)l0_entry.table_address << 9);
}
// iterate over the L1 level
do {
const ptp_t l1_entry = *get_l1_ptp_base_ptr(l1_pgtable, va);
volatile pte_t *l2_pgtable;
if (!is_l1_entry_valid(l1_entry)) {
// allocate a page table
paddr_t pgtable;
status_t err = alloc_pgtable(&pgtable);
if (err < 0) {
TRACEF("error allocating L2 page table\n");
return err;
}
map_l1(l1_pgtable, va, pgtable);
l2_pgtable = (volatile pte_t *)pgtable;
} else {
l2_pgtable = (volatile pte_t *)((uintptr_t)l1_entry.table_address << 8);
}
// for every L2 page table entry, map a page
do {
map_l2(l2_pgtable, va, pa);
va += PAGE_SIZE;
pa += PAGE_SIZE;
// If we hit the terminal address, stop
if (va == terminal_va) {
return NO_ERROR;
}
} while (get_l2_index(va) != 0);
} while (get_l1_index(va) != 0);
}
return NO_ERROR;
}
void m68k_mmu_init(void) {
LTRACE_ENTRY;
#if 0
// set up some helpful maps for qemu virt
map_range(0, 0, 64 * 1024 * 1024 - 1);
map_range(0xff000000, 0xff000000, 0 - 0xff000000 - 1);
// a few test mappings to stress the mapper
map_range(0xc0104000, 0x12345000, 0x268000 - 1);
map_range(0xc0371000, 0x6789a000, 0x440000 - 1);
// set the root pointers
set_srp((uint32_t)(uintptr_t)kernel_pgtable);
set_urp((uint32_t)(uintptr_t)kernel_pgtable);
set_tc((1 << 15)); // enable, 4K pages
#endif
dump_mmu_regs();
LTRACE_EXIT;
}
// Default stub implementations for arch_mmu routines
bool arch_mmu_supports_nx_mappings(void) {
return false;
}
bool arch_mmu_supports_ns_mappings(void) {
return false;
}
bool arch_mmu_supports_user_aspaces(void) {
return false;
}
status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) {
return ERR_NOT_SUPPORTED;
}
status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
return ERR_NOT_SUPPORTED;
}
int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) {
return ERR_NOT_SUPPORTED;
}
int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
return ERR_NOT_SUPPORTED;
}
status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) {
// Disable interrupts around the ptest instruction in case we get preempted
spin_lock_saved_state_t state;
arch_interrupt_save(&state, 0);
// Use the PTEST instruction to probe the translation
uint32_t mmusr;
asm volatile(
"ptestr (%1)\n"
"movec %%mmusr, %0"
: "=r"(mmusr) : "a"(vaddr) : "memory");
arch_interrupt_restore(state, 0);
LTRACEF("vaddr %#x, mmusr %#x\n", (uint32_t)vaddr, mmusr);
if ((mmusr & 0x1) == 0) {
return ERR_NOT_FOUND;
}
// extract the physical address from the mmusr
if (paddr) {
*paddr = (mmusr & 0xfffff000) | (vaddr & 0xfff);
}
if (flags) {
*flags = 0;
*flags |= (mmusr & (1 << 2)) ? ARCH_MMU_FLAG_PERM_RO : 0;
*flags |= (mmusr & (1 << 7)) ? 0 : ARCH_MMU_FLAG_PERM_USER;
uint32_t cm = mmusr & (3 << 5);
switch (cm) {
case 0:
case 1:
*flags |= ARCH_MMU_FLAG_CACHED;
break;
case 2:
*flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
break;
case 3:
*flags |= ARCH_MMU_FLAG_UNCACHED;
break;
}
}
return NO_ERROR;
}
vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace,
vaddr_t base, uint prev_region_arch_mmu_flags,
vaddr_t end, uint next_region_arch_mmu_flags,
vaddr_t align, size_t size, uint arch_mmu_flags) {
return (vaddr_t)NULL;
}
void arch_mmu_context_switch(arch_aspace_t *aspace) {
// no-op
}
#endif // M68K_MMU

View File

@@ -6,6 +6,7 @@ MODULE_SRCS += $(LOCAL_DIR)/arch.c
MODULE_SRCS += $(LOCAL_DIR)/asm.S
MODULE_SRCS += $(LOCAL_DIR)/exceptions.c
MODULE_SRCS += $(LOCAL_DIR)/exceptions_asm.S
MODULE_SRCS += $(LOCAL_DIR)/mmu.c
MODULE_SRCS += $(LOCAL_DIR)/start.S
MODULE_SRCS += $(LOCAL_DIR)/thread.c
@@ -27,8 +28,10 @@ else ifeq ($(M68K_CPU),68020)
ARCH_COMPILEFLAGS := -mcpu=68020
else ifeq ($(M68K_CPU),68030)
ARCH_COMPILEFLAGS := -mcpu=68030
M68K_MMU := 68030
else ifeq ($(M68K_CPU),68040)
ARCH_COMPILEFLAGS := -mcpu=68040
M68K_MMU := 68040
else
$(error add support for selected cpu $(M68K_CPU))
endif
@@ -39,13 +42,38 @@ $(info LIBGCC = $(LIBGCC))
cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc /dev/null 2>&1`"; \
then echo "$(2)"; else echo "$(3)"; fi ;)
ARCH_OPTFLAGS := -O2
# default to no mmu
WITH_MMU ?= 0
ifeq (true, $(call TOBOOL, $(WITH_MMU)))
ifeq ($(M68K_MMU),)
$(error WITH_MMU is set but no M68K_MMU is set)
endif
# we have a mmu and want the vmm/pmm
WITH_KERNEL_VM := 1
GLOBAL_DEFINES += ARCH_HAS_MMU=1
GLOBAL_DEFINES += M68K_MMU=$(M68K_MMU)
# Have the kernel occupy the top 2GB of the address space.
# This puts the kernel at 0x8000.0000
GLOBAL_DEFINES += \
KERNEL_ASPACE_BASE=0x80000000 \
KERNEL_ASPACE_SIZE=0x80000000
KERNEL_BASE ?= 0x80000000
KERNEL_LOAD_OFFSET ?= 0
else
KERNEL_BASE ?= $(MEMBASE)
KERNEL_LOAD_OFFSET ?= 0
endif
ARCH_OPTFLAGS := -O2
GLOBAL_DEFINES += MEMBASE=$(MEMBASE)
GLOBAL_DEFINES += MEMSIZE=$(MEMSIZE)
GLOBAL_DEFINES += KERNEL_BASE=$(KERNEL_BASE)
GLOBAL_DEFINES += KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET)
GLOBAL_DEFINES += M68K_CPU=$(M68K_CPU)
GLOBAL_DEFINES += M68K_CPU_$(M68K_CPU)=1

View File

@@ -11,10 +11,10 @@
FUNCTION(_start)
// load the first 4 args that were pushed on whatever stack we have
// NOTE: assumes stack is pointing at at least readable memory
movl %sp@(4),%d0
movl %sp@(8),%d1
movl %sp@(12),%d2
movl %sp@(16),%d3
movl %sp@(4),%d4
movl %sp@(8),%d5
movl %sp@(12),%d6
movl %sp@(16),%d7
#if ARCH_DO_RELOCATION
lea %pc@(_start),%a0 // load the current address using PC relative addressing mode
@@ -39,8 +39,18 @@ FUNCTION(_start)
// clear bss
bss_clear:
lea __bss_start,%a0
lea __bss_end,%a1
#if M68K_CPU >= 68020
// 020 and above have a full 32bit PC relative addressing mode.
// Since we may be using a mmu in this case, we may be operating in physical address space,
// so we need to use PC relative addressing to get the right addresses.
lea %pc@(__bss_start),%a0
lea %pc@(__bss_end),%a1
#else
// We wont be using an MMU on 68000 and 68010, so we can use absolute addresses.
movl __bss_start,%a0
movl __bss_end,%a1
#endif
cmpl %a0,%a1
beqs 1f
// zero 4 bytes at a time
@@ -50,14 +60,81 @@ bss_clear:
bne 0b
1:
#if M68K_MMU == 68040
init_mmu_68040:
// Set up DTTR0 and ITTR0 to map 0x00000000 - 0x3FFFFFFF (1GB) to 0x00000000
// Logical address base: 0x00000000, mask 0x3f000000, enable, supervisor, cacheble, copyback
movl #0x003fa020,%d0
movec %d0,%dtt0
movec %d0,%itt0
// Set up an mapping of [0, MEMSIZE) to [KERNEL_ASPACE_BASE, KERNEL_ASPACE_BASE + MEMSIZE)
// Set up L0 entries
lea %pc@(_root_page_table),%a0
addl #(KERNEL_ASPACE_BASE / L0_ENTRY_RANGE * 4),%a0 // offset into the middle of the L0 table for KERNEL_ASPACE_BASE
movl #L0_ENTRIES,%d0
lea %pc@(_l1_tables),%a1 // get pointer to L1 tables
addl #0x00000003,%a1 // mark it valid
.Ll0_loop:
movl %a1,%a0@ // store it in the L0 table
addl #4,%a0 // advance to next L0 entry
addl #L1_PGTABLE_ENTRIES * 4,%a1 // advance to next L1 table
subl #1,%d0
bne .Ll0_loop
// Set up L1 entries
lea %pc@(_l1_tables),%a0
movl #L1_ENTRIES,%d0
lea %pc@(_l2_tables),%a1 // get pointer to L2 table
addl #0x00000003,%a1 // mark it valid
.Ll1_loop:
movl %a1,%a0@
addl #4,%a0
addl #L2_PGTABLE_ENTRIES * 4,%a1 // advance to next L1 table
subl #1,%d0
bne .Ll1_loop
// Set up L2 entries
lea %pc@(_l2_tables),%a0
movl #L2_ENTRIES,%d0
movl #0x000000083,%d1 // address 0, supervisor, writable, present
.L2_loop:
movl %d1,%a0@ // read the current entry
addl #4,%a0 // advance to next L2 entry
addl #PAGE_SIZE,%d1 // advance to next page
subl #1,%d0
bne .L2_loop
// set the supervisor root pointer
lea %pc@(_root_page_table),%a0
movec %a0,%srp
movec %a0,%urp
// enable the mmu
movl #(1<<15),%d0
movec %d0,%tc
// Branch to the high memory area
movl #.Lhigh_target,%a0
jmp %a0@
.Lhigh_target:
// Turn off DTTR0 and ITTR0
clrl %d0
movec %d0,%dtt0
movec %d0,%itt0
#endif
// load the initial stack pointer
lea _default_stack_top,%sp
// branch into C land with 4 args off the previous stack
movl %d3,%sp@-
movl %d2,%sp@-
movl %d1,%sp@-
movl %d0,%sp@-
movl %d7,%sp@-
movl %d6,%sp@-
movl %d5,%sp@-
movl %d4,%sp@-
jsr lk_main
// if we return from main just loop forever
@@ -65,8 +142,35 @@ bss_clear:
END_FUNCTION(_start)
.bss
.align 4
.balign 4
_default_stack_base:
.skip 4096
_default_stack_top:
#if M68K_MMU == 68040
// Define space for page tables to set up a mapping of MEMSIZE bytes of memory at KERNEL_ASPACE_BASE
.equ PAGE_SIZE, 4096
.equ L0_PGTABLE_ENTRIES, 128 // 7 bits
.equ L0_ENTRY_RANGE, (1<<25) // each L0 entry covers 32MB
.equ L1_PGTABLE_ENTRIES, 128 // 7 bits
.equ L1_ENTRY_RANGE, (1<<18) // each L1 entry covers 256KB
.equ L2_PGTABLE_ENTRIES, 64 // 6 bits
// Number of entries at each level to fill in order to cover MEMSIZE,
// rounded up to the next L0 entry range so all of the L1 and L2 page tables are fully used.
.equ MEMSIZE_ROUNDED, (MEMSIZE + L0_ENTRY_RANGE - 1) & ~(L0_ENTRY_RANGE - 1)
.equ L0_ENTRIES, MEMSIZE_ROUNDED / L0_ENTRY_RANGE
.equ L1_ENTRIES, MEMSIZE_ROUNDED / L1_ENTRY_RANGE
.equ L2_ENTRIES, MEMSIZE_ROUNDED / PAGE_SIZE
.balign 4096
_root_page_table:
.skip L0_PGTABLE_ENTRIES * 4 // 128 entries, 4 bytes each
.balign 4096
_l1_tables:
.skip L1_ENTRIES * 4 // 4 bytes each, one per 256KB section of memory
.balign 4096
_l2_tables:
.skip L2_ENTRIES * 4 // 4 bytes each, one per page of memory
#endif // M68K_MMU == 68040

View File

@@ -8,8 +8,8 @@
#include "bootinfo.h"
#include <lk/compiler.h>
#include <lk/trace.h>
#include <lk/debug.h>
#include <lk/trace.h>
#include <stdio.h>
#define LOCAL_TRACE 0
@@ -18,22 +18,38 @@ extern uint8_t __bss_end;
static const char *bootinfo_tag_to_string(enum BOOTINFO_TAGS tag) {
switch (tag) {
case BOOTINFO_TAG_END: return "END";
case BOOTINFO_TAG_MACHTYPE: return "MACHTYPE";
case BOOTINFO_TAG_CPUTYPE: return "CPUTYPE";
case BOOTINFO_TAG_FPUTYPE: return "FPUTYPE";
case BOOTINFO_TAG_MMUTYPE: return "MMUTYPE";
case BOOTINFO_TAG_MEMCHUNK: return "MEMCHUNK";
case BOOTINFO_TAG_RAMDISK: return "RAMDISK";
case BOOTINFO_TAG_COMMAND_LINE: return "COMMAND_LINE";
case BOOTINFO_TAG_RNG_SEED: return "RNG_SEED";
case BOOTINFO_TAG_VIRT_QEMU_VERSION: return "VIRT_QEMU_VERSION";
case BOOTINFO_TAG_VIRT_GF_PIC_BASE: return "VIRT_GF_PIC_BASE";
case BOOTINFO_TAG_VIRT_GF_RTC_BASE: return "VIRT_GF_RTC_BASE";
case BOOTINFO_TAG_VIRT_GF_TTY_BASE: return "VIRT_GF_TTY_BASE";
case BOOTINFO_TAG_VIRT_VIRTIO_BASE: return "VIRT_VIRTIO_BASE";
case BOOTINFO_TAG_VIRT_CTRL_BASE: return "VIRT_CTRL_BASE";
default: return "UNKNOWN";
case BOOTINFO_TAG_END:
return "END";
case BOOTINFO_TAG_MACHTYPE:
return "MACHTYPE";
case BOOTINFO_TAG_CPUTYPE:
return "CPUTYPE";
case BOOTINFO_TAG_FPUTYPE:
return "FPUTYPE";
case BOOTINFO_TAG_MMUTYPE:
return "MMUTYPE";
case BOOTINFO_TAG_MEMCHUNK:
return "MEMCHUNK";
case BOOTINFO_TAG_RAMDISK:
return "RAMDISK";
case BOOTINFO_TAG_COMMAND_LINE:
return "COMMAND_LINE";
case BOOTINFO_TAG_RNG_SEED:
return "RNG_SEED";
case BOOTINFO_TAG_VIRT_QEMU_VERSION:
return "VIRT_QEMU_VERSION";
case BOOTINFO_TAG_VIRT_GF_PIC_BASE:
return "VIRT_GF_PIC_BASE";
case BOOTINFO_TAG_VIRT_GF_RTC_BASE:
return "VIRT_GF_RTC_BASE";
case BOOTINFO_TAG_VIRT_GF_TTY_BASE:
return "VIRT_GF_TTY_BASE";
case BOOTINFO_TAG_VIRT_VIRTIO_BASE:
return "VIRT_VIRTIO_BASE";
case BOOTINFO_TAG_VIRT_CTRL_BASE:
return "VIRT_CTRL_BASE";
default:
return "UNKNOWN";
}
}
@@ -88,4 +104,3 @@ const void *bootinfo_find_record(uint16_t id, uint16_t *size_out) {
ptr += item->size;
}
}

View File

@@ -7,34 +7,32 @@
*/
#include "platform_p.h"
#include <assert.h>
#include <inttypes.h>
#include <lk/err.h>
#include <lk/debug.h>
#include <lk/reg.h>
#include <lk/trace.h>
#include <kernel/debug.h>
#include <kernel/thread.h>
#include <platform/interrupts.h>
#include <platform/virt.h>
#include <platform/timer.h>
#include <lk/debug.h>
#include <lk/err.h>
#include <lk/reg.h>
#include <lk/trace.h>
#include <platform.h>
#include <platform/interrupts.h>
#include <platform/timer.h>
#include <platform/virt.h>
#define LOCAL_TRACE 0
// implementation of RTC at
// https://github.com/qemu/qemu/blob/master/hw/rtc/goldfish_rtc.c
volatile uint32_t * const goldfish_rtc_base = (void *)VIRT_GF_RTC_MMIO_BASE;
volatile uint32_t *const goldfish_rtc_base = (void *)VIRT_GF_RTC_MMIO_BASE;
// registers
enum {
RTC_TIME_LOW = 0x00,
RTC_TIME_HIGH = 0x04,
RTC_ALARM_LOW = 0x08,
RTC_ALARM_HIGH = 0x0c,
RTC_IRQ_ENABLED = 0x10,
RTC_CLEAR_ALARM = 0x14,
RTC_ALARM_STATUS = 0x18,
RTC_TIME_LOW = 0x00,
RTC_TIME_HIGH = 0x04,
RTC_ALARM_LOW = 0x08,
RTC_ALARM_HIGH = 0x0c,
RTC_IRQ_ENABLED = 0x10,
RTC_CLEAR_ALARM = 0x14,
RTC_ALARM_STATUS = 0x18,
RTC_CLEAR_INTERRUPT = 0x1c,
};
@@ -105,7 +103,7 @@ lk_time_t current_time(void) {
return (lk_time_t)(t / 1000000ULL); // ns -> ms
}
status_t platform_set_oneshot_timer (platform_timer_callback callback, void *arg, lk_time_t interval) {
status_t platform_set_oneshot_timer(platform_timer_callback callback, void *arg, lk_time_t interval) {
LTRACEF("callback %p, arg %p, interval %u\n", callback, arg, interval);
t_callback = callback;
@@ -126,5 +124,3 @@ void platform_stop_timer(void) {
write_reg(RTC_CLEAR_ALARM, 1);
write_reg(RTC_CLEAR_INTERRUPT, 1);
}

View File

@@ -5,40 +5,44 @@
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <kernel/thread.h>
#include <lib/cbuf.h>
#include <lk/reg.h>
#include <lk/trace.h>
#include <lib/cbuf.h>
#include <kernel/thread.h>
#include <platform.h>
#include <platform/interrupts.h>
#include <platform/debug.h>
#include <platform/interrupts.h>
#include <platform/virt.h>
#include <sys/types.h>
#if WITH_KERNEL_VM
#include <kernel/vm.h>
#endif
#include "platform_p.h"
// goldfish tty
// from https://github.com/qemu/qemu/blob/master/hw/char/goldfish_tty.c
volatile uint32_t * const goldfish_tty_base = (void *)VIRT_GF_TTY_MMIO_BASE;
volatile uint32_t *const goldfish_tty_base = (void *)VIRT_GF_TTY_MMIO_BASE;
// registers
enum {
REG_PUT_CHAR = 0x00,
REG_BYTES_READY = 0x04,
REG_CMD = 0x08,
REG_DATA_PTR = 0x10,
REG_DATA_LEN = 0x14,
REG_PUT_CHAR = 0x00,
REG_BYTES_READY = 0x04,
REG_CMD = 0x08,
REG_DATA_PTR = 0x10,
REG_DATA_LEN = 0x14,
REG_DATA_PTR_HIGH = 0x18,
REG_VERSION = 0x20,
REG_VERSION = 0x20,
};
// commands
enum {
CMD_INT_DISABLE = 0x00,
CMD_INT_ENABLE = 0x01,
CMD_WRITE_BUFFER = 0x02,
CMD_READ_BUFFER = 0x03,
CMD_INT_DISABLE = 0x00,
CMD_INT_ENABLE = 0x01,
CMD_WRITE_BUFFER = 0x02,
CMD_READ_BUFFER = 0x03,
};
#define RXBUF_SIZE 128
@@ -59,7 +63,8 @@ static enum handler_return uart_irq_handler(void *arg) {
bool resched = false;
// use a DMA read of one byte if a byte is ready
if (read_reg(REG_BYTES_READY) > 0) {
uint32_t ready = read_reg(REG_BYTES_READY);
if (ready > 0) {
write_reg(REG_CMD, CMD_READ_BUFFER);
char c = transfer_buf[0];
cbuf_write_char(&uart_rx_buf, c, false);
@@ -74,7 +79,13 @@ void goldfish_tty_early_init(void) {
write_reg(REG_CMD, CMD_INT_DISABLE);
// set up the transfer buffer for receives
write_reg(REG_DATA_PTR, (uint32_t)transfer_buf);
uint32_t buf_addr;
#if WITH_KERNEL_VM
buf_addr = (uint32_t)vaddr_to_paddr(transfer_buf);
#else
buf_addr = (uint32_t)transfer_buf;
#endif
write_reg(REG_DATA_PTR, buf_addr);
write_reg(REG_DATA_PTR_HIGH, 0);
write_reg(REG_DATA_LEN, sizeof(transfer_buf));
}
@@ -103,8 +114,9 @@ int uart_getc(char *c, bool wait) {
}
void platform_dputc(char c) {
if (c == '\n')
if (c == '\n') {
platform_dputc('\r');
}
uart_putc(c);
}

View File

@@ -30,18 +30,18 @@
* CPU IRQ #7 -> NMI
*/
#define VIRT_GF_PIC_MMIO_BASE 0xff000000 /* MMIO: 0xff000000 - 0xff005fff */
#define VIRT_GF_PIC_IRQ_BASE 1 /* IRQ: #1 -> #6 */
#define VIRT_GF_PIC_MMIO_BASE 0xff000000 /* MMIO: 0xff000000 - 0xff005fff */
#define VIRT_GF_PIC_IRQ_BASE 1 /* IRQ: #1 -> #6 */
#define VIRT_GF_PIC_NB 6
#define NUM_IRQS (VIRT_GF_PIC_NB * 32) // PIC 1 - 6
#define NUM_IRQS (VIRT_GF_PIC_NB * 32) // PIC 1 - 6
/* maps (pic + irq) base one to a linear number zero based */
#define PIC_IRQ(pic, irq) (((pic) - 1) * 32 + ((irq) - 1))
#define PIC_IRQ(pic, irq) (((pic) - 1) * 32 + ((irq) - 1))
/* 2 goldfish-rtc (and timer) */
#define VIRT_GF_RTC_MMIO_BASE 0xff006000 /* MMIO: 0xff006000 - 0xff007fff */
#define VIRT_GF_RTC_IRQ_BASE PIC_IRQ(6, 1) /* PIC: #6, IRQ: #1 */
#define VIRT_GF_RTC_MMIO_BASE 0xff006000 /* MMIO: 0xff006000 - 0xff007fff */
#define VIRT_GF_RTC_IRQ_BASE PIC_IRQ(6, 1) /* PIC: #6, IRQ: #1 */
#define VIRT_GF_RTC_NB 2
/* 1 goldfish-tty */
@@ -50,7 +50,7 @@
/* 1 virt-ctrl */
#define VIRT_CTRL_MMIO_BASE 0xff009000 /* MMIO: 0xff009000 - 0xff009fff */
#define VIRT_CTRL_IRQ_BASE PIC_IRQ(1, 1) /* PIC: #1, IRQ: #1 */
#define VIRT_CTRL_IRQ_BASE PIC_IRQ(1, 1) /* PIC: #1, IRQ: #1 */
/*
* virtio-mmio size is 0x200 bytes
@@ -58,7 +58,7 @@
* we can attach 32 virtio devices / goldfish-pic
* -> we can manage 32 * 4 = 128 virtio devices
*/
#define VIRT_VIRTIO_MMIO_BASE 0xff010000 /* MMIO: 0xff010000 - 0xff01ffff */
#define VIRT_VIRTIO_IRQ_BASE PIC_IRQ(2, 1) /* PIC: 2, 3, 4, 5, IRQ: ALL */
#define VIRT_VIRTIO_MMIO_BASE 0xff010000 /* MMIO: 0xff010000 - 0xff01ffff */
#define VIRT_VIRTIO_IRQ_BASE PIC_IRQ(2, 1) /* PIC: 2, 3, 4, 5, IRQ: ALL */
#define NUM_VIRT_VIRTIO 128

View File

@@ -8,13 +8,13 @@
#include "platform_p.h"
#include <assert.h>
#include <lk/bits.h>
#include <lk/err.h>
#include <lk/debug.h>
#include <lk/reg.h>
#include <lk/trace.h>
#include <kernel/debug.h>
#include <kernel/thread.h>
#include <lk/bits.h>
#include <lk/debug.h>
#include <lk/err.h>
#include <lk/reg.h>
#include <lk/trace.h>
#include <platform/interrupts.h>
#include <platform/virt.h>
@@ -24,14 +24,14 @@
// https://github.com/qemu/qemu/blob/master/hw/intc/goldfish_pic.c
enum {
REG_STATUS = 0x00,
REG_IRQ_PENDING = 0x04,
REG_STATUS = 0x00,
REG_IRQ_PENDING = 0x04,
REG_IRQ_DISABLE_ALL = 0x08,
REG_DISABLE = 0x0c,
REG_ENABLE = 0x10,
REG_DISABLE = 0x0c,
REG_ENABLE = 0x10,
};
volatile uint32_t * const goldfish_pic_base = (void *)VIRT_GF_PIC_MMIO_BASE;
volatile uint32_t *const goldfish_pic_base = (void *)VIRT_GF_PIC_MMIO_BASE;
static struct int_handlers {
int_handler handler;
@@ -128,4 +128,3 @@ enum handler_return m68k_platform_irq(uint8_t m68k_irq) {
return ret;
}

View File

@@ -5,17 +5,18 @@
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <dev/virtio.h>
#include <dev/virtio/net.h>
#include <kernel/thread.h>
#include <lk/err.h>
#include <lk/reg.h>
#include <lk/trace.h>
#include <kernel/thread.h>
#include <platform.h>
#include <platform/interrupts.h>
#include <platform/debug.h>
#include <platform/interrupts.h>
#include <platform/timer.h>
#include <platform/virt.h>
#include <sys/types.h>
#include <dev/virtio.h>
#include <dev/virtio/net.h>
#if WITH_LIB_MINIP
#include <lib/minip.h>
#endif
@@ -30,7 +31,38 @@
#define LOCAL_TRACE 0
// Add the one memory region we have detected from the bootinfo
static status_t add_memory_region(paddr_t base, size_t size, uint flags) {
#if WITH_KERNEL_VM
static pmm_arena_t arena;
arena.name = "mem";
arena.base = base;
arena.size = size;
arena.priority = 1;
arena.flags = PMM_ARENA_FLAG_KMAP | flags;
status_t err = pmm_add_arena(&arena);
if (err < 0) {
panic("pmm_add_arena failed\n");
}
return err;
#else
novm_add_arena("mem", base, size);
return NO_ERROR;
#endif
}
void platform_early_init(void) {
#if M68K_MMU == 68040
// use DTTR1 to map in all of peripheral space
// map 0xff000000 - 0xffffffff (16MB) to 0xff000000
// Logical address base: 0xff000000, mask 0x00000000, enable, supervisor, noncachable, serialized
uint32_t ttbr1 = 0xff00a040;
asm volatile("movec %0, %%dtt1" ::"r"(ttbr1) : "memory");
#endif
goldfish_tty_early_init();
pic_early_init();
goldfish_rtc_early_init();
@@ -55,7 +87,7 @@ void platform_early_init(void) {
uint32_t memsize = *(const uint32_t *)((uintptr_t)ptr + 4);
dprintf(INFO, "VIRT: memory base %#x size %#x\n", membase, memsize);
novm_add_arena("mem", membase, memsize);
add_memory_region((paddr_t)membase, (size_t)memsize, 0);
// TODO: read the rest of the device bootinfo records and dynamically locate devices
}
@@ -65,6 +97,11 @@ void platform_init(void) {
goldfish_tty_init();
goldfish_rtc_init();
#if M68K_MMU == 68040
// create a VM reservation for peripheral space thats using DTTR1
vmm_reserve_space(vmm_get_kernel_aspace(), "periph", 0x1000000, 0xff000000);
#endif
/* detect any virtio devices */
uint virtio_irqs[NUM_VIRT_VIRTIO];
for (int i = 0; i < NUM_VIRT_VIRTIO; i++) {
@@ -90,7 +127,7 @@ void platform_init(void) {
virtio_net_start();
//minip_start_static(ip_addr, ip_mask, ip_gateway);
// minip_start_static(ip_addr, ip_mask, ip_gateway);
minip_start_dhcp();
}
#endif

View File

@@ -7,8 +7,6 @@
*/
#pragma once
#include <stdbool.h>
void uart_init(void);
void pic_early_init(void);

View File

@@ -4,6 +4,7 @@ MODULE := $(LOCAL_DIR)
ARCH := m68k
M68K_CPU := 68040
WITH_MMU ?= 1
LK_HEAP_IMPLEMENTATION ?= dlmalloc
MODULE_DEPS += lib/cbuf

View File

@@ -4,6 +4,7 @@ MODULE := $(LOCAL_DIR)
ARCH := m68k
M68K_CPU := 68010
WITH_MMU ?= 0
LK_HEAP_IMPLEMENTATION ?= dlmalloc
WITH_LINKER_GC ?= true

View File

@@ -8,4 +8,4 @@ set -x
PROJECT=qemu-virt-m68k-test
$DIR/make-parallel $PROJECT
qemu-system-m68k -machine virt -cpu m68040 -kernel build-${PROJECT}/lk.elf -nographic $@
qemu-system-m68k -machine virt -m 64 -cpu m68040 -kernel build-${PROJECT}/lk.elf -nographic $@