[platform][device-tree] fix up 3 platforms to use the new fdtwalk routines

Three platforms had basically duplicated logic that just was pulled into
the fdtwalk library. Fix these up to call into those routines instead.

-qemu-virt-arm
-qemu-virt-riscv
-jh7110 (visionfive 2 soc)
This commit is contained in:
Travis Geiselbrecht
2024-04-16 23:15:00 -07:00
parent 4a97f932fd
commit d39f2db58d
4 changed files with 81 additions and 468 deletions

View File

@@ -18,6 +18,7 @@
#include <platform/jh7110.h>
#include <sys/types.h>
#include <lib/fdtwalk.h>
#include <dev/bus/pci.h>
#include <dev/interrupt/riscv_plic.h>
#if WITH_LIB_MINIP
#include <lib/minip.h>
@@ -29,92 +30,11 @@
#include "platform_p.h"
#define LOCAL_TRACE 1
#if WITH_KERNEL_VM
#define DEFAULT_MEMORY_SIZE (MEMSIZE) /* try to fetch from the emulator via the fdt */
static pmm_arena_t arena = {
.name = "ram",
.base = MEMORY_BASE_PHYS,
.size = DEFAULT_MEMORY_SIZE,
.flags = PMM_ARENA_FLAG_KMAP,
};
#endif
#define LOCAL_TRACE 0
static const void *fdt;
static volatile uint32_t *power_reset_reg;
// callbacks to the fdt_walk routine
static void memcallback(uint64_t base, uint64_t len, void *cookie) {
bool *found_mem = (bool *)cookie;
LTRACEF("base %#llx len %#llx cookie %p\n", base, len, cookie);
/* add another vm arena */
if (!*found_mem) {
printf("FDT: found memory arena, base %#llx size %#llx\n", base, len);
#if WITH_KERNEL_VM
arena.base = base;
arena.size = len;
pmm_add_arena(&arena);
#else
novm_add_arena("fdt", base, len);
#endif
*found_mem = true; // stop searching after the first one
}
}
struct reserved_memory_regions {
size_t count;
struct {
uint64_t base;
uint64_t len;
} regions[16];
};
static void reserved_memory_callback(uint64_t base, uint64_t len, void *cookie) {
struct reserved_memory_regions *mem = cookie;
LTRACEF("base %#llx len %#llx\n", base, len);
if (mem->count < countof(mem->regions)) {
mem->regions[mem->count].base = base;
mem->regions[mem->count].len = len;
mem->count++;
}
}
struct detected_cpus {
size_t count;
struct {
uint hart;
} cpu[SMP_MAX_CPUS];
};
static void cpucallback(uint64_t id, void *cookie) {
struct detected_cpus *cpus = cookie;
LTRACEF("hart %#llx\n", id);
if (cpus->count < SMP_MAX_CPUS) {
cpus->cpu[cpus->count].hart = id;
cpus->count++;
}
}
struct pcie_detect_state {
struct fdt_walk_pcie_info info;
} pcie_state;
static void pciecallback(const struct fdt_walk_pcie_info *info, void *cookie) {
struct pcie_detect_state *state = cookie;
LTRACEF("ecam base %#llx, len %#llx, bus_start %hhu, bus_end %hhu\n", info->ecam_base, info->ecam_len, info->bus_start, info->bus_end);
state->info = *info;
}
void platform_early_init(void) {
TRACE;
plic_early_init(PLIC_BASE_VIRT, NUM_IRQS, true);
@@ -122,98 +42,44 @@ void platform_early_init(void) {
LTRACEF("starting FDT scan\n");
/* look for a flattened device tree in the second arg passed to us */
bool found_mem = false;
struct reserved_memory_regions reserved = {0};
struct detected_cpus cpus = {0};
fdt = (void *)lk_boot_args[1];
fdt = (const void *)((uintptr_t)fdt + PERIPHERAL_BASE_VIRT);
const void *fdt = (void *)lk_boot_args[1];
#if WITH_KERNEL_VM
fdt = (const void *)((uintptr_t)fdt + KERNEL_ASPACE_BASE);
#endif
struct fdt_walk_callbacks cb = {
.mem = memcallback,
.memcookie = &found_mem,
.reserved_memory = reserved_memory_callback,
.reserved_memory_cookie = &reserved,
.cpu = cpucallback,
.cpucookie = &cpus,
.pcie = pciecallback,
.pciecookie = &pcie_state,
};
status_t err = fdt_walk(fdt, &cb);
LTRACEF("fdt_walk returns %d\n", err);
if (err != 0) {
printf("FDT: error finding FDT at %p, using default memory & cpu count\n", fdt);
if (LOCAL_TRACE) {
LTRACEF("dumping FDT at %p\n", fdt);
fdt_walk_dump(fdt);
}
// Always reserve all of physical memory below the kernel, this is where SBI lives
// TODO: figure out why uboot doesn't always put this here
reserved.regions[reserved.count].base = MEMBASE;
reserved.regions[reserved.count].len = KERNEL_LOAD_OFFSET;
reserved.count++;
// detect physical memory layout from the device tree
fdtwalk_setup_memory(fdt, lk_boot_args[1], MEMORY_BASE_PHYS, MEMSIZE);
/* add a default memory region if we didn't find it in the FDT */
if (!found_mem) {
#if WITH_KERNEL_VM
pmm_add_arena(&arena);
#else
novm_add_arena("default", MEMBASE, MEMSIZE);
#endif
}
#if WITH_KERNEL_VM
/* reserve memory described by the FDT */
for (size_t i = 0; i < reserved.count; i++) {
printf("FDT: reserving memory range [%#llx ... %#llx]\n",
reserved.regions[i].base, reserved.regions[i].base + reserved.regions[i].len - 1);
struct list_node list = LIST_INITIAL_VALUE(list);
pmm_alloc_range(reserved.regions[i].base, reserved.regions[i].len / PAGE_SIZE, &list);
}
#endif
#if WITH_SMP
// TODO: refactor this code into libfdt
if (cpus.count > 0) {
printf("FDT: found %zu cpu%c\n", cpus.count, cpus.count == 1 ? ' ' : 's');
uint harts[SMP_MAX_CPUS - 1];
// copy from the detected cpu list to an array of harts, excluding the boot hart
size_t hart_index = 0;
for (size_t i = 0; i < cpus.count; i++) {
if (cpus.cpu[i].hart != riscv_current_hart()) {
harts[hart_index++] = cpus.cpu[i].hart;
}
// we can start MAX CPUS - 1 secondaries
if (hart_index >= SMP_MAX_CPUS - 1) {
break;
}
}
// tell the riscv layer how many cores we have to start
if (hart_index > 0) {
riscv_set_secondary_harts_to_start(harts, hart_index);
}
}
#endif
// detect secondary cores to start
fdtwalk_setup_cpus_riscv(fdt);
LTRACEF("done scanning FDT\n");
/* save a copy of the pointer to the poweroff/reset register */
/* TODO: read it from the FDT */
#if WITH_KERNEL_VM
power_reset_reg = paddr_to_kvaddr(0x100000);
#else
power_reset_reg = (void *)0x100000;
#endif
}
void platform_init(void) {
plic_init();
uart_init();
// TODO: fix this, seems to read all zeros from the ecam
#if 0
/* configure and start pci from device tree */
status_t err = fdtwalk_setup_pci(fdt);
if (err >= NO_ERROR) {
// start the bus manager
pci_bus_mgr_init();
// assign resources to all devices in case they need it
pci_bus_mgr_assign_resources();
}
#endif
}
void platform_halt(platform_halt_action suggested_action,
@@ -221,17 +87,13 @@ void platform_halt(platform_halt_action suggested_action,
switch (suggested_action) {
case HALT_ACTION_SHUTDOWN:
dprintf(ALWAYS, "Shutting down... (reason = %d)\n", reason);
#if RISCV_S_MODE
// try to use SBI as a cleaner way to stop
sbi_system_reset(SBI_RESET_TYPE_SHUTDOWN, SBI_RESET_REASON_NONE);
#endif
*power_reset_reg = 0x5555;
break;
case HALT_ACTION_REBOOT:
dprintf(ALWAYS, "Rebooting... (reason = %d)\n", reason);
#if RISCV_S_MODE
sbi_system_reset(SBI_RESET_TYPE_COLD_REBOOT, SBI_RESET_REASON_NONE);
#endif
sbi_system_reset(SBI_RESET_TYPE_WARM_REBOOT, SBI_RESET_REASON_NONE);
*power_reset_reg = 0x7777;
break;
case HALT_ACTION_HALT:
@@ -250,3 +112,18 @@ void platform_halt(platform_halt_action suggested_action,
for (;;)
arch_idle();
}
status_t platform_pci_int_to_vector(unsigned int pci_int, unsigned int *vector) {
// at the moment there's no translation between PCI IRQs and native irqs
*vector = pci_int;
return NO_ERROR;
}
status_t platform_allocate_interrupts(size_t count, uint align_log2, bool msi, unsigned int *vector) {
return ERR_NOT_SUPPORTED;
}
status_t platform_compute_msi_values(unsigned int vector, unsigned int cpu, bool edge,
uint64_t *msi_address_out, uint16_t *msi_data_out) {
return ERR_NOT_SUPPORTED;
}

View File

@@ -3,21 +3,21 @@ LOCAL_DIR := $(GET_LOCAL_DIR)
MODULE := $(LOCAL_DIR)
ARCH := riscv
SUBARCH ?= 64
RISCV_MODE ?= supervisor
SUBARCH := 64
RISCV_MODE := supervisor
WITH_SMP ?= true
SMP_MAX_CPUS ?= 4
LK_HEAP_IMPLEMENTATION ?= dlmalloc
RISCV_FPU ?= true
RISCV_MMU ?= sv39
RISCV_FPU := true
RISCV_MMU := sv39
RISCV_EXTENSION_LIST ?= zba zbb
MODULE_DEPS += lib/cbuf
MODULE_DEPS += lib/fdt
MODULE_DEPS += lib/fdtwalk
MODULE_DEPS += dev/interrupt/riscv_plic
#MODULE_DEPS += dev/bus/pci
#MODULE_DEPS += dev/bus/pci/drivers
MODULE_DEPS += dev/bus/pci
MODULE_DEPS += dev/bus/pci/drivers
MODULE_SRCS += $(LOCAL_DIR)/platform.c
MODULE_SRCS += $(LOCAL_DIR)/uart.c

View File

@@ -59,56 +59,7 @@ struct mmu_initial_mapping mmu_initial_mappings[] = {
{ 0 }
};
static pmm_arena_t arena = {
.name = "ram",
.base = MEMORY_BASE_PHYS,
.size = DEFAULT_MEMORY_SIZE,
.flags = PMM_ARENA_FLAG_KMAP,
};
// callbacks to the fdt_walk routine
static void memcallback(uint64_t base, uint64_t len, void *cookie) {
bool *found_mem = (bool *)cookie;
LTRACEF("base %#llx len %#llx cookie %p\n", base, len, cookie);
/* add another novm arena */
if (!*found_mem) {
printf("FDT: found memory arena, base %#llx size %#llx\n", base, len);
/* trim size on certain platforms */
#if ARCH_ARM
if (len > 1024*1024*1024U) {
len = 1024*1024*1024; /* only use the first 1GB on ARM32 */
printf("trimming memory to 1GB\n");
}
#endif
/* set the size in the pmm arena */
arena.size = len;
*found_mem = true; // stop searching after the first one
}
}
static void cpucallback(uint64_t id, void *cookie) {
int *cpu_count = (int *)cookie;
LTRACEF("id %#llx cookie %p\n", id, cookie);
(*cpu_count)++;
}
struct pcie_detect_state {
struct fdt_walk_pcie_info info;
} pcie_state;
static void pciecallback(const struct fdt_walk_pcie_info *info, void *cookie) {
struct pcie_detect_state *state = cookie;
LTRACEF("ecam base %#llx, len %#llx, bus_start %hhu, bus_end %hhu\n", info->ecam_base, info->ecam_len, info->bus_start, info->bus_end);
state->info = *info;
}
const void *fdt = (void *)KERNEL_BASE;
void platform_early_init(void) {
/* initialize the interrupt controller */
@@ -118,91 +69,31 @@ void platform_early_init(void) {
uart_init_early();
int cpu_count = 0;
bool found_mem = false;
struct fdt_walk_callbacks cb = {
.mem = memcallback,
.memcookie = &found_mem,
.cpu = cpucallback,
.cpucookie = &cpu_count,
.pcie = pciecallback,
.pciecookie = &pcie_state,
};
const void *fdt = (void *)KERNEL_BASE;
status_t err = fdt_walk(fdt, &cb);
LTRACEF("fdt_walk returns %d\n", err);
if (err != 0) {
printf("FDT: error finding FDT at %p, using default memory & cpu count\n", fdt);
if (LOCAL_TRACE) {
LTRACEF("dumping FDT at %p\n", fdt);
fdt_walk_dump(fdt);
}
/* add the main memory arena */
pmm_add_arena(&arena);
/* reserve the first 64k of ram, which should be holding the fdt */
struct list_node list = LIST_INITIAL_VALUE(list);
pmm_alloc_range(MEMBASE, 0x10000 / PAGE_SIZE, &list);
/* count the number of secondary cpus */
if (cpu_count == 0) {
/* if we didn't find any in the FDT, assume max number */
cpu_count = SMP_MAX_CPUS;
} else if (cpu_count > 0) {
printf("FDT: found %d cpus\n", cpu_count);
cpu_count = MIN(cpu_count, SMP_MAX_CPUS);
}
LTRACEF("booting %d cpus\n", cpu_count);
/* boot the secondary cpus using the Power State Coordintion Interface */
for (int cpuid = 1; cpuid < cpu_count; cpuid++) {
/* note: assumes cpuids are numbered like MPIDR 0:0:0:N */
int ret = psci_cpu_on(cpuid, MEMBASE + KERNEL_LOAD_OFFSET);
if (ret != 0) {
printf("ERROR: psci CPU_ON returns %d\n", ret);
}
}
// detect physical memory layout from the device tree
fdtwalk_setup_memory(fdt, MEMORY_BASE_PHYS, MEMORY_BASE_PHYS, DEFAULT_MEMORY_SIZE);
}
void platform_init(void) {
status_t err;
uart_init();
/* detect pci */
#if ARCH_ARM
if (pcie_state.info.ecam_base > (1ULL << 32)) {
// dont try to configure this since we dont have LPAE support
printf("PCIE: skipping pci initialization due to high memory ECAM\n");
pcie_state.info.ecam_len = 0;
// start secondary cores
fdtwalk_setup_cpus_arm(fdt);
/* configure and start pci from device tree */
status_t err = fdtwalk_setup_pci(fdt);
if (err >= NO_ERROR) {
// start the bus manager
pci_bus_mgr_init();
// assign resources to all devices in case they need it
pci_bus_mgr_assign_resources();
}
#endif
if (pcie_state.info.ecam_len > 0) {
printf("PCIE: initializing pcie with ecam at %#" PRIx64 " found in FDT\n", pcie_state.info.ecam_base);
err = pci_init_ecam(pcie_state.info.ecam_base, pcie_state.info.ecam_len, pcie_state.info.bus_start, pcie_state.info.bus_end);
if (err == NO_ERROR) {
// add some additional resources to the pci bus manager in case it needs to configure
if (pcie_state.info.io_len > 0) {
// we can only deal with a mapping of io base 0 to the mmio base
DEBUG_ASSERT(pcie_state.info.io_base == 0);
pci_bus_mgr_add_resource(PCI_RESOURCE_IO_RANGE, pcie_state.info.io_base, pcie_state.info.io_len);
// TODO: set the mmio base somehow so pci knows what to do with it
}
if (pcie_state.info.mmio_len > 0) {
pci_bus_mgr_add_resource(PCI_RESOURCE_MMIO_RANGE, pcie_state.info.mmio_base, pcie_state.info.mmio_len);
}
if (pcie_state.info.mmio64_len > 0) {
pci_bus_mgr_add_resource(PCI_RESOURCE_MMIO64_RANGE, pcie_state.info.mmio64_base, pcie_state.info.mmio64_len);
}
// start the bus manager
pci_bus_mgr_init();
pci_bus_mgr_assign_resources();
}
}
/* detect any virtio devices */
uint virtio_irqs[NUM_VIRTIO_TRANSPORTS];

View File

@@ -39,168 +39,30 @@
#define LOCAL_TRACE 0
#if WITH_KERNEL_VM
#define DEFAULT_MEMORY_SIZE (MEMSIZE) /* try to fetch from the emulator via the fdt */
static pmm_arena_t arena = {
.name = "ram",
.base = MEMORY_BASE_PHYS,
.size = DEFAULT_MEMORY_SIZE,
.flags = PMM_ARENA_FLAG_KMAP,
};
#endif
static const void *fdt;
static volatile uint32_t *power_reset_reg;
// callbacks to the fdt_walk routine
static void memcallback(uint64_t base, uint64_t len, void *cookie) {
bool *found_mem = (bool *)cookie;
LTRACEF("base %#llx len %#llx cookie %p\n", base, len, cookie);
/* add another vm arena */
if (!*found_mem) {
printf("FDT: found memory arena, base %#llx size %#llx\n", base, len);
#if WITH_KERNEL_VM
arena.base = base;
arena.size = len;
pmm_add_arena(&arena);
#else
novm_add_arena("fdt", base, len);
#endif
*found_mem = true; // stop searching after the first one
}
}
struct reserved_memory_regions {
size_t count;
struct {
uint64_t base;
uint64_t len;
} regions[16];
};
static void reserved_memory_callback(uint64_t base, uint64_t len, void *cookie) {
struct reserved_memory_regions *mem = cookie;
LTRACEF("base %#llx len %#llx\n", base, len);
if (mem->count < countof(mem->regions)) {
mem->regions[mem->count].base = base;
mem->regions[mem->count].len = len;
mem->count++;
}
}
struct detected_cpus {
size_t count;
struct {
uint hart;
} cpu[SMP_MAX_CPUS];
};
static void cpucallback(uint64_t id, void *cookie) {
struct detected_cpus *cpus = cookie;
LTRACEF("hart %#llx\n", id);
if (cpus->count < SMP_MAX_CPUS) {
cpus->cpu[cpus->count].hart = id;
cpus->count++;
}
}
struct pcie_detect_state {
struct fdt_walk_pcie_info info;
} pcie_state;
static void pciecallback(const struct fdt_walk_pcie_info *info, void *cookie) {
struct pcie_detect_state *state = cookie;
LTRACEF("ecam base %#llx, len %#llx, bus_start %hhu, bus_end %hhu\n", info->ecam_base, info->ecam_len, info->bus_start, info->bus_end);
state->info = *info;
}
void platform_early_init(void) {
plic_early_init(PLIC_BASE_VIRT, NUM_IRQS, false);
LTRACEF("starting FDT scan\n");
/* look for a flattened device tree in the second arg passed to us */
bool found_mem = false;
struct reserved_memory_regions reserved = {0};
struct detected_cpus cpus = {0};
const void *fdt = (void *)lk_boot_args[1];
fdt = (void *)lk_boot_args[1];
#if WITH_KERNEL_VM
fdt = (const void *)((uintptr_t)fdt + PERIPHERAL_BASE_VIRT);
#endif
struct fdt_walk_callbacks cb = {
.mem = memcallback,
.memcookie = &found_mem,
.reserved_memory = reserved_memory_callback,
.reserved_memory_cookie = &reserved,
.cpu = cpucallback,
.cpucookie = &cpus,
.pcie = pciecallback,
.pciecookie = &pcie_state,
};
status_t err = fdt_walk(fdt, &cb);
LTRACEF("fdt_walk returns %d\n", err);
if (err != 0) {
printf("FDT: error finding FDT at %p, using default memory & cpu count\n", fdt);
if (LOCAL_TRACE) {
LTRACEF("dumping FDT at %p\n", fdt);
fdt_walk_dump(fdt);
}
/* add a default memory region if we didn't find it in the FDT */
if (!found_mem) {
#if WITH_KERNEL_VM
pmm_add_arena(&arena);
#else
novm_add_arena("default", MEMBASE, MEMSIZE);
#endif
}
// detect physical memory layout from the device tree
fdtwalk_setup_memory(fdt, lk_boot_args[1], MEMORY_BASE_PHYS, MEMSIZE);
#if WITH_KERNEL_VM
/* reserve memory described by the FDT */
for (size_t i = 0; i < reserved.count; i++) {
printf("FDT: reserving memory range [%#llx ... %#llx]\n",
reserved.regions[i].base, reserved.regions[i].base + reserved.regions[i].len - 1);
struct list_node list = LIST_INITIAL_VALUE(list);
pmm_alloc_range(reserved.regions[i].base, reserved.regions[i].len / PAGE_SIZE, &list);
}
#endif
#if WITH_SMP
// TODO: refactor this code into libfdt
if (cpus.count > 0) {
printf("FDT: found %zu cpu%c\n", cpus.count, cpus.count == 1 ? ' ' : 's');
uint harts[SMP_MAX_CPUS - 1];
// copy from the detected cpu list to an array of harts, excluding the boot hart
size_t hart_index = 0;
for (size_t i = 0; i < cpus.count; i++) {
if (cpus.cpu[i].hart != riscv_current_hart()) {
harts[hart_index++] = cpus.cpu[i].hart;
}
// we can start MAX CPUS - 1 secondaries
if (hart_index >= SMP_MAX_CPUS - 1) {
break;
}
}
// tell the riscv layer how many cores we have to start
if (hart_index > 0) {
riscv_set_secondary_harts_to_start(harts, hart_index);
}
}
#endif
// detect secondary cores to start
fdtwalk_setup_cpus_riscv(fdt);
LTRACEF("done scanning FDT\n");
@@ -217,31 +79,14 @@ void platform_init(void) {
plic_init();
uart_init();
/* detect pci */
if (pcie_state.info.ecam_len > 0) {
printf("PCIE: initializing pcie with ecam at %#" PRIx64 " found in FDT\n", pcie_state.info.ecam_base);
status_t err = pci_init_ecam(pcie_state.info.ecam_base, pcie_state.info.ecam_len, pcie_state.info.bus_start, pcie_state.info.bus_end);
if (err == NO_ERROR) {
// add some additional resources to the pci bus manager in case it needs to configure
if (pcie_state.info.io_len > 0) {
// we can only deal with a mapping of io base 0 to the mmio base
DEBUG_ASSERT(pcie_state.info.io_base == 0);
pci_bus_mgr_add_resource(PCI_RESOURCE_IO_RANGE, pcie_state.info.io_base, pcie_state.info.io_len);
/* configure and start pci from device tree */
status_t err = fdtwalk_setup_pci(fdt);
if (err >= NO_ERROR) {
// start the bus manager
pci_bus_mgr_init();
// TODO: set the mmio base somehow so pci knows what to do with it
}
if (pcie_state.info.mmio_len > 0) {
pci_bus_mgr_add_resource(PCI_RESOURCE_MMIO_RANGE, pcie_state.info.mmio_base, pcie_state.info.mmio_len);
}
if (pcie_state.info.mmio64_len > 0) {
pci_bus_mgr_add_resource(PCI_RESOURCE_MMIO64_RANGE, pcie_state.info.mmio64_base, pcie_state.info.mmio64_len);
}
// start the bus manager
pci_bus_mgr_init();
pci_bus_mgr_assign_resources(); // add some additional resources to the pci bus manager in case it needs to configure
};
// assign resources to all devices in case they need it
pci_bus_mgr_assign_resources();
}
/* detect any virtio devices */