WIP set up per cpu structures for x86-64
only on the boot cpu for now
This commit is contained in:
@@ -125,7 +125,8 @@ _tss_gde:
|
||||
.byte 0 /* base 31:24 */
|
||||
#if ARCH_X86_64
|
||||
/* 64-bit TSSs are 16 bytes long */
|
||||
.quad 0x0000000000000000
|
||||
.int 0 /* base 63:32 */
|
||||
.int 0 /* type(0) + reserved */
|
||||
#endif
|
||||
.set i, i+1
|
||||
.endr
|
||||
|
||||
@@ -50,6 +50,20 @@ static inline ulong arch_cycle_count(void) {
|
||||
#endif
|
||||
}
|
||||
|
||||
#if WITH_SMP
|
||||
#include <arch/x86/mp.h>
|
||||
static inline struct thread *arch_get_current_thread(void) {
|
||||
return x86_get_current_thread();
|
||||
}
|
||||
|
||||
static inline void arch_set_current_thread(struct thread *t) {
|
||||
x86_set_current_thread(t);
|
||||
}
|
||||
|
||||
static inline uint arch_curr_cpu_num(void) {
|
||||
return x86_get_cpu_num();
|
||||
}
|
||||
#else
|
||||
/* use a global pointer to store the current_thread */
|
||||
extern struct thread *_current_thread;
|
||||
|
||||
@@ -64,6 +78,7 @@ static inline void arch_set_current_thread(struct thread *t) {
|
||||
static inline uint arch_curr_cpu_num(void) {
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ARCH_X86_64
|
||||
// relies on SSE2
|
||||
|
||||
@@ -471,6 +471,42 @@ static inline void write_msr (uint32_t msr_id, uint64_t msr_write_val) {
|
||||
: : "c" (msr_id), "a" (low_val), "d"(high_val));
|
||||
}
|
||||
|
||||
static inline uint64_t x86_read_gs_offset64(uintptr_t offset) {
|
||||
uint64_t ret;
|
||||
__asm__("movq %%gs:%1, %0" : "=r"(ret) : "m"(*(uint64_t*)(offset)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void x86_write_gs_offset64(uintptr_t offset, uint64_t val) {
|
||||
__asm__("movq %0, %%gs:%1" : : "ir"(val), "m"(*(uint64_t*)(offset)) : "memory");
|
||||
}
|
||||
|
||||
static inline uint32_t x86_read_gs_offset32(uintptr_t offset) {
|
||||
uint32_t ret;
|
||||
__asm__("movl %%gs:%1, %0" : "=r"(ret) : "m"(*(uint32_t*)(offset)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void x86_write_gs_offset32(uintptr_t offset, uint32_t val) {
|
||||
__asm__("movl %0, %%gs:%1" : : "ir"(val), "m"(*(uint32_t*)(offset)) : "memory");
|
||||
}
|
||||
|
||||
#if __SIZEOF_POINTER__ == 8
|
||||
static inline void *x86_read_gs_offset_ptr(uintptr_t offset) {
|
||||
return (void *)x86_read_gs_offset64(offset);
|
||||
}
|
||||
static inline void x86_write_gs_offset_ptr(uintptr_t offset, void *val) {
|
||||
x86_write_gs_offset64(offset, (uint64_t)(val));
|
||||
}
|
||||
#else
|
||||
static inline void *x86_read_gs_offset_ptr(uintptr_t offset) {
|
||||
return (void *)x86_read_gs_offset32(offset);
|
||||
}
|
||||
static inline void x86_write_gs_offset_ptr(uintptr_t offset, void *val) {
|
||||
x86_write_gs_offset32(offset, (uint64_t)(val));
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef ulong x86_flags_t;
|
||||
|
||||
static inline x86_flags_t x86_save_flags(void) {
|
||||
|
||||
@@ -8,8 +8,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <arch/x86.h>
|
||||
|
||||
// per cpu pointer pointed to by a segment register on x86
|
||||
// per cpu pointer pointed to by gs:
|
||||
typedef struct x86_percpu {
|
||||
// pointer back to ourselves so we can get a raw pointer via segment:0
|
||||
struct x86_percpu *self;
|
||||
@@ -24,5 +25,52 @@ typedef struct x86_percpu {
|
||||
// per cpu doublefault/nmi stacks
|
||||
} x86_percpu_t;
|
||||
|
||||
#define X86_PERCPU_FIELD_OFFSET(field) offsetof(x86_percpu_t, field)
|
||||
|
||||
// called extremely early on the boot cpu and each secondary cpu
|
||||
void x86_percpu_init_early(uint cpu_num, uint apic_id);
|
||||
void x86_percpu_init_early(uint cpu_num, uint apic_id);
|
||||
|
||||
// get the percpu struct for the current cpu
|
||||
static inline x86_percpu_t *x86_get_percpu(void) {
|
||||
x86_percpu_t *percpu;
|
||||
__asm__ volatile("mov %%gs:0, %0" : "=r" (percpu));
|
||||
return percpu;
|
||||
}
|
||||
|
||||
// get the percpu struct for a specific cpu
|
||||
x86_percpu_t *x86_get_percpu_for_cpu(uint cpu_num);
|
||||
|
||||
#if 0
|
||||
#define X86_PERCPU_GET(field) (_Generic(((x86_get_percpu())->field), \
|
||||
uint32_t: x86_read_gs_offset32, \
|
||||
uint64_t: x86_read_gs_offset64, \
|
||||
struct thread*: x86_read_gs_offset_ptr) \
|
||||
(X86_PERCPU_FIELD_OFFSET(field)))
|
||||
|
||||
#define X86_PERCPU_SET(field, value) (_Generic(((x86_get_percpu())->field), \
|
||||
uint32_t: x86_write_gs_offset32, \
|
||||
uint64_t: x86_write_gs_offset64, \
|
||||
struct thread*: x86_write_gs_offset_ptr) \
|
||||
(X86_PERCPU_FIELD_OFFSET(field), value))
|
||||
#endif
|
||||
|
||||
// get the current cpu number
|
||||
static inline uint x86_get_cpu_num(void) {
|
||||
return x86_read_gs_offset32(X86_PERCPU_FIELD_OFFSET(cpu_num));
|
||||
}
|
||||
|
||||
// get the current apic id
|
||||
static inline uint x86_get_apic_id(void) {
|
||||
return x86_read_gs_offset32(X86_PERCPU_FIELD_OFFSET(apic_id));
|
||||
}
|
||||
|
||||
// get/set the current thread
|
||||
struct thread;
|
||||
|
||||
static inline struct thread *x86_get_current_thread(void) {
|
||||
return (struct thread *)x86_read_gs_offset_ptr(X86_PERCPU_FIELD_OFFSET(current_thread));
|
||||
}
|
||||
|
||||
static inline void x86_set_current_thread(struct thread *t) {
|
||||
x86_write_gs_offset_ptr(X86_PERCPU_FIELD_OFFSET(current_thread), t);
|
||||
}
|
||||
@@ -8,7 +8,10 @@
|
||||
#include <arch/x86/mp.h>
|
||||
|
||||
#include <assert.h>
|
||||
#include <lk/err.h>
|
||||
#include <arch/mp.h>
|
||||
#include <arch/x86.h>
|
||||
#include <arch/arch_ops.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
// the boot cpu's percpu struct
|
||||
@@ -16,7 +19,7 @@ static x86_percpu_t x86_boot_percpu;
|
||||
// pointer to an array of percpu structs for each of the secondary cpus
|
||||
static x86_percpu_t **x86_ap_percpus;
|
||||
|
||||
static x86_percpu_t *percpu_for_cpu(uint cpu_num) {
|
||||
x86_percpu_t *x86_get_percpu_for_cpu(uint cpu_num) {
|
||||
DEBUG_ASSERT(cpu_num < SMP_MAX_CPUS);
|
||||
if (cpu_num == 0) {
|
||||
return &x86_boot_percpu;
|
||||
@@ -26,8 +29,9 @@ static x86_percpu_t *percpu_for_cpu(uint cpu_num) {
|
||||
}
|
||||
|
||||
void x86_percpu_init_early(uint cpu_num, uint apic_id) {
|
||||
x86_percpu_t *percpu = percpu_for_cpu(cpu_num);
|
||||
x86_percpu_t *percpu = x86_get_percpu_for_cpu(cpu_num);
|
||||
|
||||
// initialize the percpu structure for this cpu
|
||||
percpu->self = percpu;
|
||||
percpu->cpu_num = cpu_num;
|
||||
percpu->apic_id = apic_id;
|
||||
@@ -37,6 +41,13 @@ void x86_percpu_init_early(uint cpu_num, uint apic_id) {
|
||||
write_msr(X86_MSR_IA32_KERNEL_GS_BASE, 0);
|
||||
write_msr(X86_MSR_IA32_GS_BASE, (uint64_t)percpu);
|
||||
#else
|
||||
//#error implement
|
||||
#error implement
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
|
||||
PANIC_UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
void arch_mp_init_percpu(void) {
|
||||
}
|
||||
|
||||
@@ -6,6 +6,11 @@ MODULE_OPTIONS := extra_warnings
|
||||
|
||||
# x86 code always runs with the mmu enabled
|
||||
WITH_KERNEL_VM := 1
|
||||
ifneq ($(CPU),legacy)
|
||||
WITH_SMP ?= 1
|
||||
else
|
||||
WITH_SMP ?= 0
|
||||
endif
|
||||
|
||||
ifeq ($(SUBARCH),x86-32)
|
||||
MEMBASE ?= 0x00000000
|
||||
@@ -41,9 +46,15 @@ GLOBAL_DEFINES += \
|
||||
KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET) \
|
||||
KERNEL_ASPACE_BASE=$(KERNEL_ASPACE_BASE) \
|
||||
KERNEL_ASPACE_SIZE=$(KERNEL_ASPACE_SIZE) \
|
||||
SMP_MAX_CPUS=1 \
|
||||
ARCH_HAS_MMU=1
|
||||
|
||||
ifeq ($(WITH_SMP),1)
|
||||
SMP_MAX_CPUS ?= 8
|
||||
GLOBAL_DEFINES += \
|
||||
WITH_SMP=1 \
|
||||
SMP_MAX_CPUS=$(SMP_MAX_CPUS)
|
||||
endif
|
||||
|
||||
MODULE_SRCS += \
|
||||
$(SUBARCH_DIR)/start.S \
|
||||
\
|
||||
@@ -121,10 +132,12 @@ else ifeq ($(SUBARCH),x86-32)
|
||||
ARCH_COMPILEFLAGS += -march=i686
|
||||
ARCH_OPTFLAGS := -O2
|
||||
GLOBAL_DEFINES += X86_LEGACY=0
|
||||
GLOBAL_DEFINES += WITH_SMP=1
|
||||
else ifeq ($(SUBARCH),x86-64)
|
||||
ARCH_COMPILEFLAGS += -march=x86-64
|
||||
ARCH_OPTFLAGS := -O2
|
||||
GLOBAL_DEFINES += X86_LEGACY=0
|
||||
GLOBAL_DEFINES += WITH_SMP=1
|
||||
endif
|
||||
|
||||
LIBGCC := $(shell $(TOOLCHAIN_PREFIX)gcc $(GLOBAL_COMPILEFLAGS) $(ARCH_COMPILEFLAGS) -print-libgcc-file-name)
|
||||
|
||||
@@ -17,18 +17,19 @@
|
||||
#include <arch/x86/descriptor.h>
|
||||
#include <arch/fpu.h>
|
||||
|
||||
#if !WITH_SMP
|
||||
/* we're uniprocessor at this point for x86, so store a global pointer to the current thread */
|
||||
struct thread *_current_thread;
|
||||
#endif
|
||||
|
||||
static void initial_thread_func(void) __NO_RETURN;
|
||||
static void initial_thread_func(void) {
|
||||
int ret;
|
||||
|
||||
/* release the thread lock that was implicitly held across the reschedule */
|
||||
spin_unlock(&thread_lock);
|
||||
arch_enable_ints();
|
||||
|
||||
ret = _current_thread->entry(_current_thread->arg);
|
||||
thread_t *ct = arch_get_current_thread();
|
||||
int ret = ct->entry(ct->arg);
|
||||
|
||||
thread_exit(ret);
|
||||
}
|
||||
|
||||
@@ -38,6 +38,9 @@ void mp_reschedule(mp_cpu_mask_t target, uint flags) {
|
||||
target &= ~mp.realtime_cpus;
|
||||
}
|
||||
target &= ~(1U << local_cpu);
|
||||
if (target == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
LTRACEF("local %d, post mask target now 0x%x\n", local_cpu, target);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user