12 Commits

Author SHA1 Message Date
David Benjamin
13d4161b45 Fill in missing PRI* macros for inttypes.h.
inttypes.h is supposed to define print macros for the various integers
defined in stdint.h. This change fills in the sized macros for all the
various suffixes. The 'o' suffixed variants are omitted because, at a
glance, lib/libc/printf.c does not support it.

Note to reviewers, these should be checked against the typedefs in
stdint.h and the prefixes defined for this libc's printf implementing.

Change-Id: I71a9425854f05a2c189389dea53a5748d3c1dd2d
2017-05-14 19:32:58 -07:00
Arve Hjønnevåg
54786a2d09 [arch][arm] Add USER_ASPACE_BASE and USER_ASPACE_SIZE to GLOBAL_DEFINES
Also adds default values.

Change-Id: Iaab601681d3129f21a0d5457af2442dbda08613d
2017-05-14 19:24:25 -07:00
Arve Hjønnevåg
cb9960b3d7 [kernel][vm] Add support for mapping a page array
Change-Id: I12fed431a6281eb33f68335cb3f97c27d02c9237
2017-05-14 19:11:37 -07:00
Arve Hjønnevåg
bf5587d41a [lib][libc] Allow overriding __stdio_FILEs
Change-Id: Id5e62858c4fae72e5c1e1044885720e6ad3d3ab5
2017-05-14 19:11:22 -07:00
Arve Hjønnevåg
e3939c9a3f [arch][arm/arm64][mmu] Add missing barriers/tlbinv/pagetableinit.
Change-Id: Ia9f6bd89f981213aa2086a1a96cb95167df55ff4
2017-05-14 19:05:54 -07:00
Arve Hjønnevåg
060236b7ed [arch][arm] Don't use the arm64 compiler to compile arm code
When compiling a component in arm mode within an arm64 kernel
TOOLCHAIN_PREFIX points to the arm64 compiler and should not
be used here.

Change-Id: I78ac1112826b5fdda325b370779226cfe699f70e
2017-05-14 19:00:42 -07:00
Arve Hjønnevåg
692b75138c [arch][arm][mmu] Fix to compile if WITH_ARCH_MMU_PICK_SPOT is set
Change-Id: Ibd35fe6f8636849eb7931667254991df5594a74e
2017-05-14 19:00:28 -07:00
Arve Hjønnevåg
2160c38c29 [arch][arm64] Fix build when WITH_LIB_SYSCALL is set.
Removes conflicting arm64_syscall definition.

Change-Id: I6c7ec20c3da5b5051da18cbc938c83d89a14909f
2017-03-30 15:53:06 -07:00
Arve Hjønnevåg
4ea26e28fc [lib][libc] Restore WITH_CUSTOM_MALLOC.
Needed to compile libc-trusty

Change-Id: I7ae3328d1c440f5ed164caa13354195303e69f05
2017-03-30 15:53:06 -07:00
Arve Hjønnevåg
b32a0fd092 make: Add MODULE_STATIC_LIB
If a module sets MODULE_STATIC_LIB to true, that module will generate
a .a file instead of a partially linked .o file.

When WITH_LINKER_GC is enables, unreferenced code and data in c files
are dropped from the final elf file. This does not work for most
assembly files, since they put code and data directly into the .text
and .data section. If a .a file is used instead however, files that
have no external references are dropped regardless of which sections
they have.

If MODULE_STATIC_LIB is true, this change also disabled LK_INIT_HOOK*
as files using this as the only entrypoint would also be dropped.

Change-Id: I846ac1bf63018d1d713ca841e03191b1fac35bf4
2017-03-30 15:53:06 -07:00
Arve Hjønnevåg
8b55dedf4c [arch][arm/arm64] Define make variables to enable use of optional simd instructions.
Support for these instructions can be detected at runtime, but then the unused
code cannot be eliminated at build time.

Change-Id: I51fae84fe4dfb5c9741d732f1f04ac765339175a
2017-03-30 15:49:00 -07:00
Arve Hjønnevåg
f12796e834 [arch][arm] Add armv8-a
Allow arm8 systems to set ARM_CPU to armv8-a to use new armv8 instructions
in 32 bit code.

Change-Id: Idad8d5fd81c71bab2f306923df7d342bac742c28
2017-03-30 15:49:00 -07:00
17 changed files with 183 additions and 24 deletions

View File

@@ -34,6 +34,7 @@
#include <arch/arm.h>
#include <arch/arm/mmu.h>
#include <kernel/vm.h>
#include <lk/init.h>
#define LOCAL_TRACE 0
#define TRACE_CONTEXT_SWITCH 0
@@ -224,6 +225,13 @@ void arm_mmu_init(void)
uint32_t n = __builtin_clz(KERNEL_ASPACE_BASE) + 1;
DEBUG_ASSERT(n <= 7);
/*
* TODO: support other sizes, arch_mmu_init_aspace below allocates a
* page table for a 1GB user address space, so ttbcr should match.
*/
DEBUG_ASSERT(n <= 2);
n = 2;
uint32_t ttbcr = (1<<4) | n; /* disable TTBCR0 and set the split between TTBR0 and TTBR1 */
arm_write_ttbr1(arm_read_ttbr0());
@@ -235,6 +243,28 @@ void arm_mmu_init(void)
#endif
}
static void arm_secondary_mmu_init(uint level)
{
uint32_t cur_ttbr0;
cur_ttbr0 = arm_read_ttbr0();
/* push out kernel mappings to ttbr1 */
arm_write_ttbr1(cur_ttbr0);
/*
* TODO: support other sizes, arch_mmu_init_aspace below allocates a
* page table for a 1GB user address space, so ttbcr should match.
*/
/* setup a user-kernel split */
arm_write_ttbcr(2);
arm_invalidate_tlb_global();
}
LK_INIT_HOOK_FLAGS(archarmmmu, arm_secondary_mmu_init,
LK_INIT_LEVEL_ARCH_EARLY, LK_INIT_FLAG_SECONDARY_CPUS);
void arch_disable_mmu(void)
{
arm_write_sctlr(arm_read_sctlr() & ~(1<<0)); // mmu disabled
@@ -259,6 +289,7 @@ void arch_mmu_context_switch(arch_aspace_t *aspace)
LTRACEF("ttbr 0x%x, ttbcr 0x%x\n", ttbr, ttbcr);
arm_write_ttbr0(ttbr);
arm_write_ttbcr(ttbcr);
arm_invalidate_tlb_global();
}
status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags)
@@ -481,7 +512,8 @@ static inline bool are_regions_compatible(uint new_region_flags,
}
vaddr_t arch_mmu_pick_spot(vaddr_t base, uint prev_region_flags,
vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace,
vaddr_t base, uint prev_region_flags,
vaddr_t end, uint next_region_flags,
vaddr_t align, size_t size, uint flags)
{
@@ -737,6 +769,10 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
aspace->tt_virt = va;
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
/* zero the top level translation table */
/* XXX remove when PMM starts returning pre-zeroed pages */
memset(aspace->tt_virt, 0, PAGE_SIZE);
}
LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);

View File

@@ -34,6 +34,12 @@
/* echo | gcc -E -dM - to dump builtin defines */
#if defined(__ARM_ARCH_8A__)
#ifndef ARM_ARCH_LEVEL
#define ARM_ARCH_LEVEL 8
#endif
#endif
#if defined(__ARM_ARCH_7EM__)
#define ARM_ARCH_7EM 1
#endif

View File

@@ -48,6 +48,8 @@
#define CACHE_LINE 32
#elif ARM_CPU_CORTEX_A15
#define CACHE_LINE 64
#elif ARM_CPU_CORTEX_ARMV8_A
#define CACHE_LINE 64
#else
#error unknown cpu
#endif

View File

@@ -193,6 +193,31 @@ GLOBAL_DEFINES += \
HANDLED_CORE := true
ENABLE_THUMB := false # armemu doesn't currently support thumb properly
endif
ifeq ($(ARM_CPU),armv8-a)
GLOBAL_DEFINES += \
ARM_CPU_CORTEX_ARMV8_A=1 \
ARM_WITH_CP15=1 \
ARM_WITH_MMU=1 \
ARM_ISA_ARMv7=1 \
ARM_ISA_ARMv7A=1 \
ARM_WITH_THUMB=1 \
ARM_WITH_THUMB2=1 \
ARM_WITH_CACHE=1 \
ARM_WITH_L2=1
ifneq ($(ARM_WITHOUT_VFP_NEON),true)
GLOBAL_DEFINES += \
ARM_WITH_VFP=1 \
ARM_WITH_NEON=1
# Enable optional instructions unless platform already disabled them
USE_ARM_V7_NEON ?= true
USE_ARM_V8_AES ?= true
USE_ARM_V8_PMULL ?= true
USE_ARM_V8_SHA1 ?= true
USE_ARM_V8_SHA2 ?= true
endif
HANDLED_CORE := true
endif
ifneq ($(HANDLED_CORE),true)
$(error $(LOCAL_DIR)/rules.mk doesnt have logic for arm core $(ARM_CPU))
@@ -234,12 +259,17 @@ WITH_LINKER_GC ?= 1
# we have a mmu and want the vmm/pmm
WITH_KERNEL_VM ?= 1
USER_ASPACE_BASE ?= 0x00001000
USER_ASPACE_SIZE ?= 0x3fffe000
# for arm, have the kernel occupy the entire top 3GB of virtual space,
# but put the kernel itself at 0x80000000.
# this leaves 0x40000000 - 0x80000000 open for kernel space to use.
GLOBAL_DEFINES += \
KERNEL_ASPACE_BASE=0x40000000 \
KERNEL_ASPACE_SIZE=0xc0000000
KERNEL_ASPACE_SIZE=0xc0000000 \
USER_ASPACE_BASE=$(USER_ASPACE_BASE) \
USER_ASPACE_SIZE=$(USER_ASPACE_SIZE)
KERNEL_BASE ?= 0x80000000
KERNEL_LOAD_OFFSET ?= 0

View File

@@ -4,12 +4,17 @@ ARCH_arm_TOOLCHAIN_INCLUDED := 1
# try to find the toolchain
ifndef ARCH_arm_TOOLCHAIN_PREFIX
FOUNDTOOL=
$(info $(TOOLCHAIN_PREFIX))
# if TOOLCHAIN_PREFIX is not empty, try to use it first
# if TOOLCHAIN_PREFIX is not empty and ARCH_arm64_TOOLCHAIN_PREFIX is empty,
# try to use TOOLCHAIN_PREFIX first
ifneq ($(TOOLCHAIN_PREFIX),)
ifeq ($(ARCH_arm64_TOOLCHAIN_PREFIX),)
ARCH_arm_TOOLCHAIN_PREFIX := $(TOOLCHAIN_PREFIX)
FOUNDTOOL=$(shell which $(ARCH_arm_TOOLCHAIN_PREFIX)gcc)
endif
endif
# try a series of common arm toolchain prefixes in the path
ifeq ($(FOUNDTOOL),)
@@ -108,5 +113,11 @@ endif
ifeq ($(ARM_CPU),armemu)
ARCH_arm_COMPILEFLAGS += -march=armv7-a
endif
ifeq ($(ARM_CPU),armv8-a)
ARCH_arm_COMPILEFLAGS += -march=$(ARM_CPU)
ifneq ($(ARM_WITHOUT_VFP_NEON),true)
ARCH_arm_COMPILEFLAGS += -mfpu=vfpv3 -mfloat-abi=softfp
endif
endif
endif

View File

@@ -71,9 +71,8 @@ void arm64_sync_exception(struct arm64_iframe_long *iframe)
case 0b010001: /* syscall from arm32 */
case 0b010101: /* syscall from arm64 */
#ifdef WITH_LIB_SYSCALL
void arm64_syscall(struct arm64_iframe_long *iframe);
arch_enable_fiqs();
arm64_syscall(iframe);
arm64_syscall(iframe, (ec == 0x15) ? true : false);
arch_disable_fiqs();
return;
#else

View File

@@ -531,7 +531,7 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
aspace->tt_virt, MMU_ARM64_GLOBAL_ASID);
} else {
ret = arm64_mmu_map(vaddr, paddr, count * PAGE_SIZE,
mmu_flags_to_pte_attr(flags),
mmu_flags_to_pte_attr(flags) | MMU_PTE_ATTR_NON_GLOBAL,
0, MMU_USER_SIZE_SHIFT,
MMU_USER_TOP_SHIFT, MMU_USER_PAGE_SIZE_SHIFT,
aspace->tt_virt, MMU_ARM64_USER_ASID);
@@ -595,13 +595,14 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
aspace->tt_virt = arm64_kernel_translation_table;
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
} else {
size_t page_table_size = MMU_USER_PAGE_TABLE_ENTRIES_TOP * sizeof(pte_t);
//DEBUG_ASSERT(base >= 0);
DEBUG_ASSERT(base + size <= 1UL << MMU_USER_SIZE_SHIFT);
aspace->base = base;
aspace->size = size;
pte_t *va = pmm_alloc_kpages(1, NULL);
pte_t *va = memalign(page_table_size, page_table_size);
if (!va)
return ERR_NO_MEMORY;
@@ -609,8 +610,7 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
/* zero the top level translation table */
/* XXX remove when PMM starts returning pre-zeroed pages */
memset(aspace->tt_virt, 0, PAGE_SIZE);
memset(aspace->tt_virt, 0, page_table_size);
}
LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
@@ -627,9 +627,7 @@ status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace)
// XXX make sure it's not mapped
vm_page_t *page = paddr_to_vm_page(aspace->tt_phys);
DEBUG_ASSERT(page);
pmm_free_page(page);
free(aspace->tt_virt);
return NO_ERROR;
}
@@ -651,6 +649,7 @@ void arch_mmu_context_switch(arch_aspace_t *aspace)
if (TRACE_CONTEXT_SWITCH)
TRACEF("ttbr 0x%llx, tcr 0x%llx\n", ttbr, tcr);
ARM64_TLBI(aside1, (uint64_t)MMU_ARM64_USER_ASID << 48);
DSB;
} else {
tcr = MMU_TCR_FLAGS_KERNEL;

View File

@@ -46,6 +46,15 @@ GLOBAL_DEFINES += \
SMP_MAX_CPUS=1
endif
ifeq (false,$(call TOBOOL,$(ARM_WITHOUT_VFP_NEON)))
# Enable optional instructions unless platform already disabled them
USE_ARM_V7_NEON ?= true
USE_ARM_V8_AES ?= true
USE_ARM_V8_PMULL ?= true
USE_ARM_V8_SHA1 ?= true
USE_ARM_V8_SHA2 ?= true
endif
ARCH_OPTFLAGS := -O2
# we have a mmu and want the vmm/pmm

View File

@@ -193,6 +193,7 @@ endif
CCACHE ?=
CC := $(CCACHE) $(TOOLCHAIN_PREFIX)gcc
LD := $(TOOLCHAIN_PREFIX)ld
AR := $(TOOLCHAIN_PREFIX)ar
OBJDUMP := $(TOOLCHAIN_PREFIX)objdump
OBJCOPY := $(TOOLCHAIN_PREFIX)objcopy
CPPFILT := $(TOOLCHAIN_PREFIX)c++filt

View File

@@ -240,9 +240,17 @@ __NONNULL((1));
/* allocate a region of virtual space that maps a physical piece of address space.
the physical pages that back this are not allocated from the pmm. */
status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags)
status_t vmm_alloc_physical_etc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t *paddr, uint paddr_count, uint vmm_flags, uint arch_mmu_flags)
__NONNULL((1));
/* allocate a region of virtual space that maps a physical piece of address space.
the physical pages that back this are not allocated from the pmm. */
static inline status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags)
{
return vmm_alloc_physical_etc(aspace, name, size, ptr, align_log2,
&paddr, 1, vmm_flags, arch_mmu_flags);
} __NONNULL((1))
/* allocate a region of memory backed by newly allocated contiguous physical memory */
status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags)
__NONNULL((1));

View File

@@ -76,6 +76,9 @@ struct lk_init_struct {
const char *name;
};
#if MODULE_STATIC_LIB
#define LK_INIT_HOOK_FLAGS(a,b,c,d) _Pragma("GCC error \"init hooks are not fully compatible with static libraries\"")
#else
#define LK_INIT_HOOK_FLAGS(_name, _hook, _level, _flags) \
const struct lk_init_struct _init_struct_##_name __ALIGNED(sizeof(void *)) __SECTION(".lk_init") = { \
.level = _level, \
@@ -83,6 +86,7 @@ struct lk_init_struct {
.hook = _hook, \
.name = #_name, \
};
#endif
#define LK_INIT_HOOK(_name, _hook, _level) \
LK_INIT_HOOK_FLAGS(_name, _hook, _level, LK_INIT_FLAG_PRIMARY_CPU)

View File

@@ -349,16 +349,21 @@ status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size,
return r ? NO_ERROR : ERR_NO_MEMORY;
}
status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags)
status_t vmm_alloc_physical_etc(vmm_aspace_t *aspace, const char *name, size_t size,
void **ptr, uint8_t align_log2, paddr_t *paddr, uint paddr_count,
uint vmm_flags, uint arch_mmu_flags)
{
status_t ret;
uint i;
size_t page_size;
LTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%lx vmm_flags 0x%x arch_mmu_flags 0x%x\n",
aspace, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags);
LTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%lx... vmm_flags 0x%x arch_mmu_flags 0x%x\n",
aspace, name, size, ptr ? *ptr : 0, paddr[0], vmm_flags, arch_mmu_flags);
DEBUG_ASSERT(aspace);
DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
for (i = 0; i < paddr_count; i++) {
DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr[i]));
}
DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
if (!name)
@@ -368,7 +373,10 @@ status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
return ERR_INVALID_ARGS;
if (size == 0)
return NO_ERROR;
if (!IS_PAGE_ALIGNED(paddr) || !IS_PAGE_ALIGNED(size))
if (!paddr_count)
return ERR_INVALID_ARGS;
page_size = size / paddr_count;
if (!IS_PAGE_ALIGNED(paddr[0]) || !IS_PAGE_ALIGNED(page_size))
return ERR_INVALID_ARGS;
vaddr_t vaddr = 0;
@@ -397,8 +405,11 @@ status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
*ptr = (void *)r->base;
/* map all of the pages */
int err = arch_mmu_map(&aspace->arch_aspace, r->base, paddr, size / PAGE_SIZE, arch_mmu_flags);
LTRACEF("arch_mmu_map returns %d\n", err);
for (i = 0; i < paddr_count; i++) {
int err = arch_mmu_map(&aspace->arch_aspace, r->base + i * page_size,
paddr[i], page_size / PAGE_SIZE, arch_mmu_flags);
LTRACEF("arch_mmu_map returns %d\n", err);
}
ret = NO_ERROR;

View File

@@ -23,8 +23,29 @@
#ifndef __INTTYPES_H
#define __INTTYPES_H
#define PRId8 "hhd"
#define PRIi8 "hhi"
#define PRIu8 "hhu"
#define PRIx8 "hhx"
#define PRIX8 "hhX"
#define PRId16 "hd"
#define PRIi16 "hi"
#define PRIu16 "hu"
#define PRIx16 "hx"
#define PRIX16 "hX"
#define PRId32 "d"
#define PRIi32 "i"
#define PRIu32 "u"
#define PRIx32 "x"
#define PRIX32 "X"
#define PRId64 "lld"
#define PRIi64 "lli"
#define PRIu64 "llu"
#define PRIx64 "llx"
#define PRIX64 "llX"
#include <stdint.h>

View File

@@ -3,9 +3,12 @@ LOCAL_DIR := $(GET_LOCAL_DIR)
MODULE := $(LOCAL_DIR)
MODULE_DEPS := \
lib/heap \
lib/io
ifndef WITH_CUSTOM_MALLOC
MODULE_DEPS += lib/heap
endif
MODULE_SRCS += \
$(LOCAL_DIR)/atoi.c \
$(LOCAL_DIR)/bsearch.c \

View File

@@ -32,7 +32,7 @@
.io = &console_io, \
}
FILE __stdio_FILEs[3] = {
__WEAK FILE __stdio_FILEs[3] = {
DEFINE_STDIO_DESC(0), /* stdin */
DEFINE_STDIO_DESC(1), /* stdout */
DEFINE_STDIO_DESC(2), /* stderr */

View File

@@ -23,7 +23,7 @@ $(OUTELF): $(ALLMODULE_OBJS) $(EXTRA_OBJS) $(LINKER_SCRIPT) $(EXTRA_LINKER_SCRIP
@echo linking $@
$(NOECHO)$(SIZE) -t --common $(sort $(ALLMODULE_OBJS)) $(EXTRA_OBJS)
$(NOECHO)$(LD) $(GLOBAL_LDFLAGS) -dT $(LINKER_SCRIPT) $(addprefix -T,$(EXTRA_LINKER_SCRIPTS)) \
$(ALLMODULE_OBJS) $(EXTRA_OBJS) $(LIBGCC) -Map=$(OUTELF).map -o $@
--start-group $(ALLMODULE_OBJS) $(EXTRA_OBJS) --end-group $(LIBGCC) -Map=$(OUTELF).map -o $@
$(OUTELF).sym: $(OUTELF)
@echo generating symbols: $@

View File

@@ -4,6 +4,7 @@
# args:
# MODULE : module name (required)
# MODULE_SRCS : list of source files, local path (required)
# MODULE_STATIC_LIB : if true generate .a instead of .o
# MODULE_DEPS : other modules that this one depends on
# MODULE_DEFINES : #defines local to this module
# MODULE_OPTFLAGS : OPTFLAGS local to this module
@@ -62,6 +63,10 @@ MODULE_DEFINES += MODULE_SRCDEPS=\"$(subst $(SPACE),_,$(MODULE_SRCDEPS))\"
MODULE_DEFINES += MODULE_DEPS=\"$(subst $(SPACE),_,$(MODULE_DEPS))\"
MODULE_DEFINES += MODULE_SRCS=\"$(subst $(SPACE),_,$(MODULE_SRCS))\"
ifeq (true,$(call TOBOOL,$(MODULE_STATIC_LIB)))
MODULE_DEFINES += MODULE_STATIC_LIB=1
endif
# generate a per-module config.h file
MODULE_CONFIG := $(MODULE_BUILDDIR)/module_config.h
@@ -84,12 +89,25 @@ include make/compile.mk
#$(info MODULE_OBJS = $(MODULE_OBJS))
# build a ld -r style combined object
ifeq (true,$(call TOBOOL,$(MODULE_STATIC_LIB)))
MODULE_OBJECT := $(call TOBUILDDIR,$(MODULE_SRCDIR).mod.a)
$(MODULE_OBJECT): $(MODULE_OBJS) $(MODULE_EXTRA_OBJS)
@$(MKDIR)
@echo creating $@
$(NOECHO)rm -f $@
$(NOECHO)$(AR) rcs $@ $^
else
MODULE_OBJECT := $(call TOBUILDDIR,$(MODULE_SRCDIR).mod.o)
$(MODULE_OBJECT): $(MODULE_OBJS) $(MODULE_EXTRA_OBJS)
@$(MKDIR)
@echo linking $@
$(NOECHO)$(LD) $(GLOBAL_MODULE_LDFLAGS) -r $^ -o $@
endif
# track all of the source files compiled
ALLSRCS += $(MODULE_SRCS)
@@ -108,6 +126,7 @@ MODULE_SRCDIR :=
MODULE_BUILDDIR :=
MODULE_DEPS :=
MODULE_SRCS :=
MODULE_STATIC_LIB :=
MODULE_OBJS :=
MODULE_DEFINES :=
MODULE_OPTFLAGS :=