diff --git a/app/tests/mem_tests.c b/app/tests/mem_tests.c index fdd14fe1..0b76e646 100644 --- a/app/tests/mem_tests.c +++ b/app/tests/mem_tests.c @@ -225,7 +225,7 @@ usage: free(ptr); #endif } else if (argc == 3) { - void *ptr = (void *)argv[1].u; + void *ptr = argv[1].p; size_t len = argv[2].u; /* run the tests */ diff --git a/arch/arm/rules.mk b/arch/arm/rules.mk index a3fbb838..0a388f7c 100644 --- a/arch/arm/rules.mk +++ b/arch/arm/rules.mk @@ -285,6 +285,10 @@ ifeq ($(MEMVARS_SET),0) $(error missing MEMBASE or MEMSIZE variable, please set in target rules.mk) endif +GLOBAL_DEFINES += \ + MEMBASE=$(MEMBASE) \ + MEMSIZE=$(MEMSIZE) + LIBGCC := $(shell $(TOOLCHAIN_PREFIX)gcc $(GLOBAL_COMPILEFLAGS) $(ARCH_COMPILEFLAGS) $(THUMBCFLAGS) -print-libgcc-file-name) $(info LIBGCC = $(LIBGCC)) diff --git a/arch/arm64/mmu.c b/arch/arm64/mmu.c index 055364f0..c723bf83 100644 --- a/arch/arm64/mmu.c +++ b/arch/arm64/mmu.c @@ -193,12 +193,12 @@ static int alloc_page_table(paddr_t *paddrp, uint page_size_shift) if (ret != count) return ERR_NO_MEMORY; } else { - vaddr = heap_alloc(size, size); + vaddr = memalign(size, size); if (!vaddr) return ERR_NO_MEMORY; ret = arch_mmu_query((vaddr_t)vaddr, paddrp, NULL); if (ret) { - heap_free(vaddr); + free(vaddr); return ret; } } @@ -218,7 +218,7 @@ static void free_page_table(void *vaddr, paddr_t paddr, uint page_size_shift) panic("bad page table paddr 0x%lx\n", paddr); pmm_free_page(page); } else { - heap_free(vaddr); + free(vaddr); } } diff --git a/arch/arm64/rules.mk b/arch/arm64/rules.mk index 41a222d4..ee0e3330 100644 --- a/arch/arm64/rules.mk +++ b/arch/arm64/rules.mk @@ -81,6 +81,9 @@ KERNEL_LOAD_OFFSET ?= 0 endif +GLOBAL_DEFINES += \ + MEMBASE=$(MEMBASE) \ + MEMSIZE=$(MEMSIZE) # try to find the toolchain include $(LOCAL_DIR)/toolchain.mk diff --git a/arch/microblaze/rules.mk b/arch/microblaze/rules.mk index 54ca30d4..106b3696 100644 --- a/arch/microblaze/rules.mk +++ b/arch/microblaze/rules.mk @@ -48,6 +48,10 @@ KERNEL_BASE ?= $(MEMBASE) KERNEL_LOAD_OFFSET ?= 0 VECTOR_BASE_PHYS ?= 0 +GLOBAL_DEFINES += \ + MEMBASE=$(MEMBASE) \ + MEMSIZE=$(MEMSIZE) + # potentially generated files that should be cleaned out with clean make rule GENERATED += \ $(BUILDDIR)/linker.ld diff --git a/arch/or1k/rules.mk b/arch/or1k/rules.mk index 8c25a8a6..0e01f17c 100644 --- a/arch/or1k/rules.mk +++ b/arch/or1k/rules.mk @@ -37,6 +37,10 @@ GLOBAL_DEFINES += \ KERNEL_BASE=$(KERNEL_BASE) \ KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET) +GLOBAL_DEFINES += \ + MEMBASE=$(MEMBASE) \ + MEMSIZE=$(MEMSIZE) + # potentially generated files that should be cleaned out with clean make rule GENERATED += \ $(BUILDDIR)/linker.ld diff --git a/include/kernel/novm.h b/include/kernel/novm.h new file mode 100644 index 00000000..41ae73f6 --- /dev/null +++ b/include/kernel/novm.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2015 Google, Inc. All rights reserved + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __KERNEL_NOVM_H +#define __KERNEL_NOVM_H + +#include +#include +#include + +#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) +#define IS_PAGE_ALIGNED(x) IS_ALIGNED(x, PAGE_SIZE) + +#define NOVM_ARENA_ANY (-1) +#define NOVM_ARENA_MAIN (0) +#define NOVM_ARENA_SECONDARY (1) + +void *novm_alloc_pages(size_t pages, int arena_index); +void novm_free_pages(void* address, size_t pages); +status_t novm_alloc_specific_pages(void *address, size_t pages); + +// You can call this once and it will give you some possibly unaligned memory +// that would otherwise go to waste. The memory can't be freed. +void *novm_alloc_unaligned(size_t *size_return); + +void novm_add_arena(const char *name, uintptr_t arena_start, uintptr_t arena_size); + +#endif diff --git a/include/kernel/vm.h b/include/kernel/vm.h index 433e7649..ec34da3a 100644 --- a/include/kernel/vm.h +++ b/include/kernel/vm.h @@ -175,6 +175,8 @@ void *pmm_alloc_kpages(uint count, struct list_node *list); /* Helper routine for pmm_alloc_kpages. */ static inline void *pmm_alloc_kpage(void) { return pmm_alloc_kpages(1, NULL); } +size_t pmm_free_kpages(void *ptr, uint count); + /* physical to virtual */ void *paddr_to_kvaddr(paddr_t pa); diff --git a/include/lib/console.h b/include/lib/console.h index a81aefc4..f270b3fe 100644 --- a/include/lib/console.h +++ b/include/lib/console.h @@ -32,6 +32,7 @@ typedef struct { const char *str; unsigned long u; + void* p; long i; bool b; } cmd_args; diff --git a/include/lib/page_alloc.h b/include/lib/page_alloc.h new file mode 100644 index 00000000..858d83fe --- /dev/null +++ b/include/lib/page_alloc.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2015 Google, Inc. All rights reserved + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __LIB_PAGE_ALLOC_H +#define __LIB_PAGE_ALLOC_H + +#include +#include +#include + +// to pick up PAGE_SIZE, PAGE_ALIGN, etc +#if WITH_KERNEL_VM +#include +#else +#include +#endif + +/* A simple page-aligned wrapper around the pmm or novm implementation of + * the underlying physical page allocator. Used by system heaps or any + * other user that wants pages of memory but doesn't want to use LK + * specific apis. + */ + +__BEGIN_CDECLS; + +void *page_alloc(size_t pages); +void page_free(void *ptr, size_t pages); + +// You can call this once at the start, and it will either return a page or it +// will return some non-page-aligned memory that would otherwise go to waste. +void *page_first_alloc(size_t *size_return); + +__END_CDECLS; + +#endif diff --git a/include/malloc.h b/include/malloc.h index a93b0562..abafbe95 100644 --- a/include/malloc.h +++ b/include/malloc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Travis Geiselbrecht + * Copyright (c) 2008-2015 Travis Geiselbrecht * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files @@ -27,15 +27,8 @@ #include #include -__BEGIN_CDECLS - -void *malloc(size_t size) __MALLOC; -void *memalign(size_t boundary, size_t size) __MALLOC; -void *calloc(size_t count, size_t size) __MALLOC; -void *realloc(void *ptr, size_t size) __MALLOC; -void free(void *ptr); - -__END_CDECLS +/* lib/heap provides malloc/free definitions */ +#include #endif diff --git a/kernel/novm/novm.c b/kernel/novm/novm.c new file mode 100644 index 00000000..ff9b478d --- /dev/null +++ b/kernel/novm/novm.c @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2015 Google, Inc. All rights reserved + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/novm.h" + +#include +#include +#include +#include +#include +#include +#include + +#define LOCAL_TRACE 0 + +struct novm_arena { + mutex_t lock; + const char *name; + size_t pages; + char *map; + char *base; + size_t size; + + // We divide the memory up into pages. If there is memory we can use before + // the first aligned page address, then we record it here and the heap will use + // it. +#define MINIMUM_USEFUL_UNALIGNED_SIZE 64 + void* unaligned_area; + size_t unaligned_size; +}; + + +/* not a static vm, not using the kernel vm */ +extern int _end; +extern int _end_of_ram; + +#define MEM_START ((uintptr_t)&_end) +#define MEM_SIZE ((MEMBASE + MEMSIZE) - MEM_START) +#define DEFAULT_MAP_SIZE (MEMSIZE >> PAGE_SIZE_SHIFT) + +/* a static list of arenas */ +#ifndef NOVM_MAX_ARENAS +#define NOVM_MAX_ARENAS 1 +#endif +struct novm_arena arena[NOVM_MAX_ARENAS]; + +void *novm_alloc_unaligned(size_t *size_return) +{ + /* only do the unaligned thing in the first arena */ + if (arena[0].unaligned_area != NULL) { + *size_return = arena[0].unaligned_size; + void *result = arena[0].unaligned_area; + arena[0].unaligned_area = NULL; + arena[0].unaligned_size = 0; + return result; + } + *size_return = PAGE_SIZE; + return novm_alloc_pages(1, NOVM_ARENA_ANY); +} + +static bool in_arena(struct novm_arena *n, void* p) +{ + if (n->size == 0) + return false; + + char *ptr = (char *)p; + char *base = n->base; + return ptr >= base && ptr < base + n->size; +} + +static void novm_init_helper(struct novm_arena* n, const char *name, + uintptr_t arena_start, uintptr_t arena_size, + char* default_map, size_t default_map_size) +{ + uintptr_t start = ROUNDUP(arena_start, PAGE_SIZE); + uintptr_t size = ROUNDDOWN(arena_start + arena_size, PAGE_SIZE) - start; + + mutex_init(&n->lock); + + size_t map_size = size >> PAGE_SIZE_SHIFT; + char* map = default_map; + if (map == NULL || default_map_size < map_size) { + // allocate the map out of the arena itself + map = (char *)arena_start; + + // Grab enough map for 16Mbyte of arena each time around the loop. + while (start - arena_start < map_size) { + start += PAGE_SIZE; + size -= PAGE_SIZE; + map_size--; + } + + if ((char *)start - (map + ROUNDUP(map_size, 4)) >= MINIMUM_USEFUL_UNALIGNED_SIZE) { + n->unaligned_area = map + ROUNDUP(map_size, 4); + n->unaligned_size = (char *)start - (map + ROUNDUP(map_size, 4)); + } + } else if (start - arena_start >= MINIMUM_USEFUL_UNALIGNED_SIZE) { + n->unaligned_area = (char *)arena_start; + n->unaligned_size = start - arena_start; + } + n->name = name; + n->map = map; + memset(n->map, 0, map_size); + n->pages = map_size; + n->base = (char *)start; + n->size = size; +} + +void novm_add_arena(const char *name, uintptr_t arena_start, uintptr_t arena_size) +{ + for (uint i = 0; i < NOVM_MAX_ARENAS; i++) { + if (arena[i].pages == 0) { + novm_init_helper(&arena[i], name, arena_start, arena_size, NULL, 0); + return; + } + } + panic("novm_add_arena: too many arenas added, bump NOVM_MAX_ARENAS!\n"); +} + +static void novm_init(uint level) +{ + static char mem_allocation_map[DEFAULT_MAP_SIZE]; + novm_init_helper(&arena[0], "main", MEM_START, MEM_SIZE, mem_allocation_map, DEFAULT_MAP_SIZE); +} + +LK_INIT_HOOK(novm, &novm_init, LK_INIT_LEVEL_PLATFORM_EARLY - 1); + +void *novm_alloc_helper(struct novm_arena *n, size_t pages) +{ + if (pages == 0 || pages > n->pages) + return NULL; + + mutex_acquire(&n->lock); + for (size_t i = 0; i <= n->pages - pages; i++) { + bool found = true; + for (size_t j = 0; j < pages; j++) { + if (n->map[i + j] != 0) { + i += j; + found = false; + break; + } + } + if (found) { + memset(n->map + i, 1, pages); + mutex_release(&n->lock); + return n->base + (i << PAGE_SIZE_SHIFT); + } + } + mutex_release(&n->lock); + + return NULL; +} + +void* novm_alloc_pages(size_t pages, int arena_index) +{ + LTRACEF("pages %zu\n", pages); + + if (arena_index < 0) { + /* allocate from any arena */ + for (uint i = 0; i < NOVM_MAX_ARENAS; i++) { + void *result = novm_alloc_helper(&arena[i], pages); + if (result) + return result; + } + } else if (arena_index < NOVM_MAX_ARENAS) { + /* allocate from a specific index */ + return novm_alloc_helper(&arena[arena_index], pages); + } + + return NULL; +} + +void novm_free_pages(void* address, size_t pages) +{ + LTRACEF("address %p, pages %zu\n", address, pages); + + struct novm_arena *n = NULL; + for (uint i = 0; i < NOVM_MAX_ARENAS; i++) { + if (in_arena(&arena[i], address)) { + n = &arena[i]; + break; + } + } + if (!n) + return; + + DEBUG_ASSERT(in_arena(n, address)); + + size_t index = ((char *)address - (char*)(n->base)) >> PAGE_SIZE_SHIFT; + char *map = n->map; + + mutex_acquire(&n->lock); + for (size_t i = 0; i < pages; i++) map[index + i] = 0; + mutex_release(&n->lock); +} + +status_t novm_alloc_specific_pages(void *address, size_t pages) +{ + LTRACEF("address %p, pages %zu\n", address, pages); + + struct novm_arena *n = NULL; + for (uint i = 0; i < NOVM_MAX_ARENAS; i++) { + if (in_arena(&arena[i], address)) { + n = &arena[i]; + break; + } + } + if (!n) + return ERR_NOT_FOUND; + + size_t index = ((char *)address - (char*)(n->base)) >> PAGE_SIZE_SHIFT; + char *map = n->map; + + status_t err = NO_ERROR; + + mutex_acquire(&n->lock); + for (size_t i = 0; i < pages; i++) { + if (map[index + i] != 0) { + err = ERR_NO_MEMORY; + break; + } + map[index + i] = 1; + } + mutex_release(&n->lock); + + return err; +} + + +#if LK_DEBUGLEVEL > 1 +#if WITH_LIB_CONSOLE + +#include + +static int cmd_novm(int argc, const cmd_args *argv); +static void novm_dump(void); + +STATIC_COMMAND_START +STATIC_COMMAND("novm", "page allocator (for devices without VM support) debug commands", &cmd_novm) +STATIC_COMMAND_END(novm); + +static int cmd_novm(int argc, const cmd_args *argv) +{ + if (argc < 2) { +notenoughargs: + printf("not enough arguments\n"); +usage: + printf("usage:\n"); + printf("\t%s info\n", argv[0].str); + printf("\t%s alloc [arena #]\n", argv[0].str); + printf("\t%s free
[numberofpages]\n", argv[0].str); + return -1; + } + + if (strcmp(argv[1].str, "info") == 0) { + novm_dump(); + } else if (strcmp(argv[1].str, "alloc") == 0) { + if (argc < 3) goto notenoughargs; + + int arena_index = (argc >= 4) ? argv[3].i : NOVM_ARENA_ANY; + void *ptr = novm_alloc_pages(argv[2].u, arena_index); + printf("novm_alloc_pages returns %p\n", ptr); + } else if (strcmp(argv[1].str, "free") == 0) { + if (argc < 3) goto notenoughargs; + size_t pages = (argc >= 4) ? argv[3].u : 1; + novm_free_pages(argv[2].p, pages); + printf("novm_free_pages: %zd pages at %p\n", pages, argv[2].p); + } else { + printf("unrecognized command\n"); + goto usage; + } + + return 0; +} + +static void novm_dump_arena(struct novm_arena *n) +{ + if (n->pages == 0) { + return; + } + + mutex_acquire(&n->lock); + printf("name '%s', %d pages, each %zdk (%zdk in all)\n", n->name, n->pages, PAGE_SIZE >> 10, (PAGE_SIZE * n->pages) >> 10); + printf(" range: %p-%p\n", (void *)n->base, (char *)n->base + n->size); + printf(" unaligned range: %p-%p\n", n->unaligned_area, n->unaligned_area + n->unaligned_size); + unsigned i; + size_t in_use = 0; + for (i = 0; i < n->pages; i++) if (n->map[i] != 0) in_use++; + printf(" %zd/%zd in use\n", in_use, n->pages); +#define MAX_PRINT 1024u + for (i = 0; i < MAX_PRINT && i < n->pages; i++) { + if ((i & 63) == 0) printf(" "); + printf("%c", n->map[i] ? '*' : '.'); + if ((i & 63) == 63) printf("\n"); + } + if (i == MAX_PRINT && n->pages > MAX_PRINT) { + printf(" etc., %zd more pages.", n->pages - MAX_PRINT); + } + printf("\n"); + mutex_release(&n->lock); +} + +static void novm_dump(void) +{ + for (uint i = 0; i < NOVM_MAX_ARENAS; i++) { + novm_dump_arena(&arena[i]); + } +} + +#endif +#endif + diff --git a/kernel/novm/rules.mk b/kernel/novm/rules.mk new file mode 100644 index 00000000..73e4db8f --- /dev/null +++ b/kernel/novm/rules.mk @@ -0,0 +1,8 @@ +LOCAL_DIR := $(GET_LOCAL_DIR) + +MODULE := $(LOCAL_DIR) + +MODULE_SRCS += \ + $(LOCAL_DIR)/novm.c + +include make/module.mk diff --git a/kernel/rules.mk b/kernel/rules.mk index aab917b3..95f5f369 100644 --- a/kernel/rules.mk +++ b/kernel/rules.mk @@ -19,6 +19,8 @@ MODULE_SRCS := \ ifeq ($(WITH_KERNEL_VM),1) MODULE_DEPS += kernel/vm +else +MODULE_DEPS += kernel/novm endif include make/module.mk diff --git a/kernel/vm/pmm.c b/kernel/vm/pmm.c index fea77c86..0b9afc17 100644 --- a/kernel/vm/pmm.c +++ b/kernel/vm/pmm.c @@ -262,6 +262,28 @@ void *pmm_alloc_kpages(uint count, struct list_node *list) return paddr_to_kvaddr(pa); } +size_t pmm_free_kpages(void *_ptr, uint count) +{ + LTRACEF("ptr %p, count %u\n", _ptr, count); + + uint8_t *ptr = (uint8_t *)_ptr; + + struct list_node list; + list_initialize(&list); + + while (count > 0) { + vm_page_t *p = address_to_page(kvaddr_to_paddr(ptr)); + if (p) { + list_add_tail(&list, &p->node); + } + + ptr += PAGE_SIZE; + count--; + } + + return pmm_free(&list); +} + size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, struct list_node *list) { LTRACEF("count %u, align %u\n", count, alignment_log2); diff --git a/lib/buildsig/buildsig.c b/lib/buildsig/buildsig.c index 3436a1a3..54a676f7 100644 --- a/lib/buildsig/buildsig.c +++ b/lib/buildsig/buildsig.c @@ -130,7 +130,7 @@ usage: if (!strcmp(argv[1].str, "dump")) { const void *offset = &__rom_start; if (argc >= 3) { - offset = (void *)argv[2].u; + offset = argv[2].p; } const lk_version_t *v; diff --git a/lib/cksum/debug.c b/lib/cksum/debug.c index 14606d6f..132cc00c 100644 --- a/lib/cksum/debug.c +++ b/lib/cksum/debug.c @@ -56,7 +56,7 @@ static int cmd_crc16(int argc, const cmd_args *argv) return -1; } - uint16_t crc = crc16((void *)argv[1].u, argv[2].u); + uint16_t crc = crc16(argv[1].p, argv[2].u); printf("0x%hx\n", crc); @@ -71,7 +71,7 @@ static int cmd_crc32(int argc, const cmd_args *argv) return -1; } - uint32_t crc = crc32(0, (void *)argv[1].u, argv[2].u); + uint32_t crc = crc32(0, argv[1].p, argv[2].u); printf("0x%x\n", crc); @@ -86,7 +86,7 @@ static int cmd_adler32(int argc, const cmd_args *argv) return -1; } - uint32_t crc = adler32(0, (void *)argv[1].u, argv[2].u); + uint32_t crc = adler32(0, argv[1].p, argv[2].u); printf("0x%x\n", crc); @@ -101,7 +101,7 @@ static int cmd_cksum_bench(int argc, const cmd_args *argv) bool freebuf; if (argc > 1) { - buf = (void *)argv[1].u; + buf = argv[1].p; freebuf = false; } else { buf = malloc(BUFSIZE); diff --git a/lib/console/console.c b/lib/console/console.c index 59514863..bdcab58f 100644 --- a/lib/console/console.c +++ b/lib/console/console.c @@ -538,7 +538,9 @@ static void convert_args(int argc, cmd_args *argv) int i; for (i = 0; i < argc; i++) { - argv[i].u = atoul(argv[i].str); + unsigned long u = atoul(argv[i].str); + argv[i].u = u; + argv[i].p = (void*)u; argv[i].i = atol(argv[i].str); if (!strcmp(argv[i].str, "true") || !strcmp(argv[i].str, "on")) { diff --git a/lib/debugcommands/debugcommands.c b/lib/debugcommands/debugcommands.c index 85df564e..c73ab4e8 100644 --- a/lib/debugcommands/debugcommands.c +++ b/lib/debugcommands/debugcommands.c @@ -306,7 +306,7 @@ static int cmd_chain(int argc, const cmd_args *argv) return -1; } - arch_chain_load((void *)argv[1].u, 0, 0, 0, 0); + arch_chain_load(argv[1].p, 0, 0, 0, 0); return 0; } diff --git a/lib/dlmalloc/dlmalloc.c b/lib/heap/dlmalloc/dlmalloc.c similarity index 99% rename from lib/dlmalloc/dlmalloc.c rename to lib/heap/dlmalloc/dlmalloc.c index 8b8daae3..f13d8b9d 100644 --- a/lib/dlmalloc/dlmalloc.c +++ b/lib/heap/dlmalloc/dlmalloc.c @@ -539,12 +539,23 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #define LACKS_UNISTD_H #define LACKS_SYS_PARAM_H #define LACKS_SCHED_H -#define HAVE_MMAP 0 -#define HAVE_MORECORE 1 -#define USE_LOCKS 2 +#define HAVE_MMAP 1 +#include +#include +#include #include +#define MMAP(s) mmap(s) +#define DIRECT_MMAP(s) mmap(s) +#define MUNMAP(b, s) munmap(b, s) +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T /* disable direct mapping of chunks */ +#define DEFAULT_GRANULARITY (64*1024) +#define MMAP_CLEARS 0 +#define HAVE_MORECORE 0 +#define USE_LOCKS 2 #define ABORT panic("dlmalloc abort\n") -#define MALLOC_FAILURE_ACTION //dprintf(INFO, "dlmalloc failure\n"); +#define MALLOC_FAILURE_ACTION // dprintf(INFO, "dlmalloc failure\n"); +#define MALLOC_INSPECT_ALL 1 +#define REALLOC_ZERO_BYTES_FREES 1 #endif /* LK */ #ifndef WIN32 @@ -1652,7 +1663,28 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); #if HAVE_MMAP -#ifndef WIN32 +#if defined(LK) + +/* LK specific stuff here */ +static inline void *mmap(size_t len) { + DEBUG_ASSERT(IS_PAGE_ALIGNED(len)); + + void *ptr = page_alloc(len / PAGE_SIZE); + if (!ptr) + return MFAIL; + return ptr; +} + +static inline int munmap(void *base, size_t len) { + DEBUG_ASSERT(IS_PAGE_ALIGNED((uintptr_t)base)); + DEBUG_ASSERT(IS_PAGE_ALIGNED(len)); + + page_free(base, len / PAGE_SIZE); + return 0; +} + +#elif !defined(WIN32) + #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) #define MMAP_PROT (PROT_READ|PROT_WRITE) #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) diff --git a/lib/dlmalloc/include/lib/dlmalloc.h b/lib/heap/dlmalloc/include/lib/dlmalloc.h similarity index 100% rename from lib/dlmalloc/include/lib/dlmalloc.h rename to lib/heap/dlmalloc/include/lib/dlmalloc.h diff --git a/lib/dlmalloc/rules.mk b/lib/heap/dlmalloc/rules.mk similarity index 100% rename from lib/dlmalloc/rules.mk rename to lib/heap/dlmalloc/rules.mk diff --git a/lib/heap/heap.c b/lib/heap/heap.c deleted file mode 100644 index 7e99bf6d..00000000 --- a/lib/heap/heap.c +++ /dev/null @@ -1,628 +0,0 @@ -/* - * Copyright (c) 2008-2009,2012-2014 Travis Geiselbrecht - * Copyright (c) 2009 Corey Tabaka - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define LOCAL_TRACE 0 - -#define DEBUG_HEAP 0 -#define ALLOC_FILL 0x99 -#define FREE_FILL 0x77 -#define PADDING_FILL 0x55 -#define PADDING_SIZE 64 - -#define HEAP_MAGIC 'HEAP' - -#if WITH_KERNEL_VM - -#include -/* we will use kalloc routines to back our heap */ -#if !defined(HEAP_GROW_SIZE) -#define HEAP_GROW_SIZE (4 * 1024 * 1024) /* size the heap grows by when it runs out of memory */ -#endif - -STATIC_ASSERT(IS_PAGE_ALIGNED(HEAP_GROW_SIZE)); - -#elif WITH_STATIC_HEAP - -#if !defined(HEAP_START) || !defined(HEAP_LEN) -#error WITH_STATIC_HEAP set but no HEAP_START or HEAP_LEN defined -#endif - -#else -/* not a static vm, not using the kernel vm */ -extern int _end; -extern int _end_of_ram; - -/* default to using up the rest of memory after the kernel ends */ -/* may be modified by other parts of the system */ -uintptr_t _heap_start = (uintptr_t)&_end; -uintptr_t _heap_end = (uintptr_t)&_end_of_ram; - -#define HEAP_START ((uintptr_t)_heap_start) -#define HEAP_LEN ((uintptr_t)_heap_end - HEAP_START) -#endif - -struct free_heap_chunk { - struct list_node node; - size_t len; -}; - -struct heap { - void *base; - size_t len; - size_t remaining; - size_t low_watermark; - mutex_t lock; - struct list_node free_list; - struct list_node delayed_free_list; - spin_lock_t delayed_free_lock; -}; - -// heap static vars -static struct heap theheap; - -// structure placed at the beginning every allocation -struct alloc_struct_begin { -#if LK_DEBUGLEVEL > 1 - unsigned int magic; -#endif - void *ptr; - size_t size; -#if DEBUG_HEAP - void *padding_start; - size_t padding_size; -#endif -}; - -static ssize_t heap_grow(size_t len); - -static void dump_free_chunk(struct free_heap_chunk *chunk) -{ - dprintf(INFO, "\t\tbase %p, end 0x%lx, len 0x%zx\n", chunk, (vaddr_t)chunk + chunk->len, chunk->len); -} - -static void heap_dump(void) -{ - dprintf(INFO, "Heap dump:\n"); - dprintf(INFO, "\tbase %p, len 0x%zx\n", theheap.base, theheap.len); - dprintf(INFO, "\tfree list:\n"); - - mutex_acquire(&theheap.lock); - - struct free_heap_chunk *chunk; - list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { - dump_free_chunk(chunk); - } - mutex_release(&theheap.lock); - - dprintf(INFO, "\tdelayed free list:\n"); - spin_lock_saved_state_t state; - spin_lock_irqsave(&theheap.delayed_free_lock, state); - list_for_every_entry(&theheap.delayed_free_list, chunk, struct free_heap_chunk, node) { - dump_free_chunk(chunk); - } - spin_unlock_irqrestore(&theheap.delayed_free_lock, state); -} - -static void heap_test(void) -{ - void *ptr[16]; - - ptr[0] = heap_alloc(8, 0); - ptr[1] = heap_alloc(32, 0); - ptr[2] = heap_alloc(7, 0); - ptr[3] = heap_alloc(0, 0); - ptr[4] = heap_alloc(98713, 0); - ptr[5] = heap_alloc(16, 0); - - heap_free(ptr[5]); - heap_free(ptr[1]); - heap_free(ptr[3]); - heap_free(ptr[0]); - heap_free(ptr[4]); - heap_free(ptr[2]); - - heap_dump(); - - int i; - for (i=0; i < 16; i++) - ptr[i] = 0; - - for (i=0; i < 32768; i++) { - unsigned int index = (unsigned int)rand() % 16; - - if ((i % (16*1024)) == 0) - printf("pass %d\n", i); - -// printf("index 0x%x\n", index); - if (ptr[index]) { -// printf("freeing ptr[0x%x] = %p\n", index, ptr[index]); - heap_free(ptr[index]); - ptr[index] = 0; - } - unsigned int align = 1 << ((unsigned int)rand() % 8); - ptr[index] = heap_alloc((unsigned int)rand() % 32768, align); -// printf("ptr[0x%x] = %p, align 0x%x\n", index, ptr[index], align); - - DEBUG_ASSERT(((addr_t)ptr[index] % align) == 0); -// heap_dump(); - } - - for (i=0; i < 16; i++) { - if (ptr[i]) - heap_free(ptr[i]); - } - - heap_dump(); -} - -// try to insert this free chunk into the free list, consuming the chunk by merging it with -// nearby ones if possible. Returns base of whatever chunk it became in the list. -static struct free_heap_chunk *heap_insert_free_chunk(struct free_heap_chunk *chunk) -{ -#if LK_DEBUGLEVEL > INFO - vaddr_t chunk_end = (vaddr_t)chunk + chunk->len; -#endif - - LTRACEF("chunk ptr %p, size 0x%zx\n", chunk, chunk->len); - - struct free_heap_chunk *next_chunk; - struct free_heap_chunk *last_chunk; - - mutex_acquire(&theheap.lock); - - theheap.remaining += chunk->len; - - // walk through the list, finding the node to insert before - list_for_every_entry(&theheap.free_list, next_chunk, struct free_heap_chunk, node) { - if (chunk < next_chunk) { - DEBUG_ASSERT(chunk_end <= (vaddr_t)next_chunk); - - list_add_before(&next_chunk->node, &chunk->node); - - goto try_merge; - } - } - - // walked off the end of the list, add it at the tail - list_add_tail(&theheap.free_list, &chunk->node); - - // try to merge with the previous chunk -try_merge: - last_chunk = list_prev_type(&theheap.free_list, &chunk->node, struct free_heap_chunk, node); - if (last_chunk) { - if ((vaddr_t)last_chunk + last_chunk->len == (vaddr_t)chunk) { - // easy, just extend the previous chunk - last_chunk->len += chunk->len; - - // remove ourself from the list - list_delete(&chunk->node); - - // set the chunk pointer to the newly extended chunk, in case - // it needs to merge with the next chunk below - chunk = last_chunk; - } - } - - // try to merge with the next chunk - if (next_chunk) { - if ((vaddr_t)chunk + chunk->len == (vaddr_t)next_chunk) { - // extend our chunk - chunk->len += next_chunk->len; - - // remove them from the list - list_delete(&next_chunk->node); - } - } - - mutex_release(&theheap.lock); - - return chunk; -} - -static struct free_heap_chunk *heap_create_free_chunk(void *ptr, size_t len, bool allow_debug) -{ - DEBUG_ASSERT((len % sizeof(void *)) == 0); // size must be aligned on pointer boundary - -#if DEBUG_HEAP - if (allow_debug) - memset(ptr, FREE_FILL, len); -#endif - - struct free_heap_chunk *chunk = (struct free_heap_chunk *)ptr; - chunk->len = len; - - return chunk; -} - -static void heap_free_delayed_list(void) -{ - struct list_node list; - - list_initialize(&list); - - spin_lock_saved_state_t state; - spin_lock_irqsave(&theheap.delayed_free_lock, state); - - struct free_heap_chunk *chunk; - while ((chunk = list_remove_head_type(&theheap.delayed_free_list, struct free_heap_chunk, node))) { - list_add_head(&list, &chunk->node); - } - spin_unlock_irqrestore(&theheap.delayed_free_lock, state); - - while ((chunk = list_remove_head_type(&list, struct free_heap_chunk, node))) { - LTRACEF("freeing chunk %p\n", chunk); - heap_insert_free_chunk(chunk); - } -} - -void *heap_alloc(size_t size, unsigned int alignment) -{ - void *ptr; -#if DEBUG_HEAP - size_t original_size = size; -#endif - - LTRACEF("size %zd, align %d\n", size, alignment); - - // deal with the pending free list - if (unlikely(!list_is_empty(&theheap.delayed_free_list))) { - heap_free_delayed_list(); - } - - // alignment must be power of 2 - if (alignment & (alignment - 1)) - return NULL; - - // we always put a size field + base pointer + magic in front of the allocation - size += sizeof(struct alloc_struct_begin); -#if DEBUG_HEAP - size += PADDING_SIZE; -#endif - - // make sure we allocate at least the size of a struct free_heap_chunk so that - // when we free it, we can create a struct free_heap_chunk struct and stick it - // in the spot - if (size < sizeof(struct free_heap_chunk)) - size = sizeof(struct free_heap_chunk); - - // round up size to a multiple of native pointer size - size = ROUNDUP(size, sizeof(void *)); - - // deal with nonzero alignments - if (alignment > 0) { - if (alignment < 16) - alignment = 16; - - // add alignment for worst case fit - size += alignment; - } - -#if WITH_KERNEL_VM - int retry_count = 0; -retry: -#endif - mutex_acquire(&theheap.lock); - - // walk through the list - ptr = NULL; - struct free_heap_chunk *chunk; - list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { - DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size - - // is it big enough to service our allocation? - if (chunk->len >= size) { - ptr = chunk; - - // remove it from the list - struct list_node *next_node = list_next(&theheap.free_list, &chunk->node); - list_delete(&chunk->node); - - if (chunk->len > size + sizeof(struct free_heap_chunk)) { - // there's enough space in this chunk to create a new one after the allocation - struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size, true); - - // truncate this chunk - chunk->len -= chunk->len - size; - - // add the new one where chunk used to be - if (next_node) - list_add_before(next_node, &newchunk->node); - else - list_add_tail(&theheap.free_list, &newchunk->node); - } - - // the allocated size is actually the length of this chunk, not the size requested - DEBUG_ASSERT(chunk->len >= size); - size = chunk->len; - -#if DEBUG_HEAP - memset(ptr, ALLOC_FILL, size); -#endif - - ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin)); - - // align the output if requested - if (alignment > 0) { - ptr = (void *)ROUNDUP((addr_t)ptr, (addr_t)alignment); - } - - struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; - as--; -#if LK_DEBUGLEVEL > 1 - as->magic = HEAP_MAGIC; -#endif - as->ptr = (void *)chunk; - as->size = size; - theheap.remaining -= size; - - if (theheap.remaining < theheap.low_watermark) { - theheap.low_watermark = theheap.remaining; - } -#if DEBUG_HEAP - as->padding_start = ((uint8_t *)ptr + original_size); - as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size)); -// printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size); - - memset(as->padding_start, PADDING_FILL, as->padding_size); -#endif - - break; - } - } - - mutex_release(&theheap.lock); - -#if WITH_KERNEL_VM - /* try to grow the heap if we can */ - if (ptr == NULL && retry_count == 0) { - size_t growby = MAX(HEAP_GROW_SIZE, ROUNDUP(size, PAGE_SIZE)); - - ssize_t err = heap_grow(growby); - if (err >= 0) { - retry_count++; - goto retry; - } - } -#endif - - LTRACEF("returning ptr %p\n", ptr); - - return ptr; -} - -void heap_free(void *ptr) -{ - if (ptr == 0) - return; - - LTRACEF("ptr %p\n", ptr); - - // check for the old allocation structure - struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; - as--; - - DEBUG_ASSERT(as->magic == HEAP_MAGIC); - -#if DEBUG_HEAP - { - uint i; - uint8_t *pad = (uint8_t *)as->padding_start; - - for (i = 0; i < as->padding_size; i++) { - if (pad[i] != PADDING_FILL) { - printf("free at %p scribbled outside the lines:\n", ptr); - hexdump(pad, as->padding_size); - panic("die\n"); - } - } - } -#endif - - LTRACEF("allocation was %zd bytes long at ptr %p\n", as->size, as->ptr); - - // looks good, create a free chunk and add it to the pool - heap_insert_free_chunk(heap_create_free_chunk(as->ptr, as->size, true)); -} - -void heap_delayed_free(void *ptr) -{ - LTRACEF("ptr %p\n", ptr); - - // check for the old allocation structure - struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; - as--; - - DEBUG_ASSERT(as->magic == HEAP_MAGIC); - - struct free_heap_chunk *chunk = heap_create_free_chunk(as->ptr, as->size, false); - - spin_lock_saved_state_t state; - spin_lock_irqsave(&theheap.delayed_free_lock, state); - list_add_head(&theheap.delayed_free_list, &chunk->node); - spin_unlock_irqrestore(&theheap.delayed_free_lock, state); -} - -void heap_get_stats(struct heap_stats *ptr) -{ - struct free_heap_chunk *chunk; - - if ((struct heap_stats*)NULL==ptr) { - return; - } - //flush the delayed free list - if (unlikely(!list_is_empty(&theheap.delayed_free_list))) { - heap_free_delayed_list(); - } - - ptr->heap_start = theheap.base; - ptr->heap_len = theheap.len; - ptr->heap_free=0; - ptr->heap_max_chunk = 0; - - mutex_acquire(&theheap.lock); - - list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { - ptr->heap_free += chunk->len; - - if (chunk->len > ptr->heap_max_chunk) { - ptr->heap_max_chunk = chunk->len; - } - } - - ptr->heap_low_watermark = theheap.low_watermark; - - mutex_release(&theheap.lock); -} - -static ssize_t heap_grow(size_t size) -{ -#if WITH_KERNEL_VM - size = ROUNDUP(size, PAGE_SIZE); - - void *ptr = pmm_alloc_kpages(size / PAGE_SIZE, NULL); - if (!ptr) { - TRACEF("failed to grow kernel heap by 0x%zx bytes\n", size); - return ERR_NO_MEMORY; - } - - LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr); - - heap_insert_free_chunk(heap_create_free_chunk(ptr, size, true)); - - /* change the heap start and end variables */ - if ((uintptr_t)ptr < (uintptr_t)theheap.base) - theheap.base = ptr; - - uintptr_t endptr = (uintptr_t)ptr + size; - if (endptr > (uintptr_t)theheap.base + theheap.len) { - theheap.len = (uintptr_t)endptr - (uintptr_t)theheap.base; - } - - return size; -#else - return ERR_NO_MEMORY; -#endif -} - -void heap_init(void) -{ - LTRACE_ENTRY; - - // create a mutex - mutex_init(&theheap.lock); - - // initialize the free list - list_initialize(&theheap.free_list); - - // initialize the delayed free list - list_initialize(&theheap.delayed_free_list); - spin_lock_init(&theheap.delayed_free_lock); - - // set the heap range -#if WITH_KERNEL_VM - theheap.base = pmm_alloc_kpages(HEAP_GROW_SIZE / PAGE_SIZE, NULL); - theheap.len = HEAP_GROW_SIZE; - - if (theheap.base == 0) { - panic("HEAP: error allocating initial heap size\n"); - } -#else - theheap.base = (void *)HEAP_START; - theheap.len = HEAP_LEN; -#endif - theheap.remaining = 0; // will get set by heap_insert_free_chunk() - theheap.low_watermark = theheap.len; - LTRACEF("base %p size %zd bytes\n", theheap.base, theheap.len); - - // create an initial free chunk - heap_insert_free_chunk(heap_create_free_chunk(theheap.base, theheap.len, false)); -} - -/* add a new block of memory to the heap */ -void heap_add_block(void *ptr, size_t len) -{ - heap_insert_free_chunk(heap_create_free_chunk(ptr, len, false)); -} - -#if LK_DEBUGLEVEL > 1 -#if WITH_LIB_CONSOLE - -#include - -static int cmd_heap(int argc, const cmd_args *argv); - -STATIC_COMMAND_START -STATIC_COMMAND("heap", "heap debug commands", &cmd_heap) -STATIC_COMMAND_END(heap); - -static int cmd_heap(int argc, const cmd_args *argv) -{ - if (argc < 2) { -notenoughargs: - printf("not enough arguments\n"); -usage: - printf("usage:\n"); - printf("\t%s info\n", argv[0].str); - printf("\t%s alloc [alignment]\n", argv[0].str); - printf("\t%s free
\n", argv[0].str); - return -1; - } - - if (strcmp(argv[1].str, "info") == 0) { - heap_dump(); - } else if (strcmp(argv[1].str, "alloc") == 0) { - if (argc < 3) goto notenoughargs; - - void *ptr = heap_alloc(argv[2].u, (argc >= 3) ? argv[3].u : 0); - printf("heap_alloc returns %p\n", ptr); - } else if (strcmp(argv[1].str, "free") == 0) { - if (argc < 2) goto notenoughargs; - - heap_free((void *)argv[2].u); - } else { - printf("unrecognized command\n"); - goto usage; - } - - return 0; -} - -#endif -#endif - -/* vim: set ts=4 sw=4 noexpandtab: */ - diff --git a/lib/heap/heap_wrapper.c b/lib/heap/heap_wrapper.c new file mode 100644 index 00000000..1856b8cd --- /dev/null +++ b/lib/heap/heap_wrapper.c @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2008-2015 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LOCAL_TRACE 0 + +/* delayed free list */ +struct list_node delayed_free_list = LIST_INITIAL_VALUE(delayed_free_list); +spin_lock_t delayed_free_lock = SPIN_LOCK_INITIAL_VALUE; + +#if WITH_LIB_HEAP_MINIHEAP +/* miniheap implementation */ +#include + +static inline void *HEAP_MALLOC(size_t s) { return miniheap_alloc(s, 0); } +static inline void *HEAP_REALLOC(void *ptr, size_t s) { return miniheap_realloc(ptr, s); } +static inline void *HEAP_MEMALIGN(size_t boundary, size_t s) { return miniheap_alloc(s, boundary); } +#define HEAP_FREE miniheap_free +static inline void *HEAP_CALLOC(size_t n, size_t s) { + size_t realsize = n * s; + + void *ptr = miniheap_alloc(n * s, 0); + if (likely(ptr)) + memset(ptr, 0, realsize); + return ptr; +} +static inline void HEAP_INIT(void) { + /* start the heap off with some spare memory in the page allocator */ + size_t len; + void *ptr = page_first_alloc(&len); + miniheap_init(ptr, len); +} +#define HEAP_DUMP miniheap_dump +#define HEAP_TRIM miniheap_trim + +/* end miniheap implementation */ +#elif WITH_LIB_HEAP_DLMALLOC +/* dlmalloc implementation */ +#include + +#define HEAP_MALLOC(s) dlmalloc(s) +#define HEAP_CALLOC(n, s) dlcalloc(n, s) +#define HEAP_MEMALIGN(b, s) dlmemalign(b, s) +#define HEAP_REALLOC(p, s) dlrealloc(p, s) +#define HEAP_FREE(p) dlfree(p) +static inline void HEAP_INIT(void) {} + +static inline void HEAP_DUMP(void) { + struct mallinfo minfo = dlmallinfo(); + + printf("\tmallinfo:\n"); + printf("\t\tarena space 0x%zx\n", minfo.arena); + printf("\t\tfree chunks 0x%zx\n", minfo.ordblks); + printf("\t\tspace in mapped regions 0x%zx\n", minfo.hblkhd); + printf("\t\tmax total allocated 0x%zx\n", minfo.usmblks); + printf("\t\ttotal allocated 0x%zx\n", minfo.uordblks); + printf("\t\tfree 0x%zx\n", minfo.fordblks); + printf("\t\treleasable space 0x%zx\n", minfo.keepcost); + + printf("\theap block list:\n"); + void dump_callback(void *start, void *end, size_t used_bytes, void *arg) { + printf("\t\tstart %p end %p used_bytes %zu\n", start, end, used_bytes); + } + + dlmalloc_inspect_all(&dump_callback, NULL); +} + +static inline void HEAP_TRIM(void) { dlmalloc_trim(0); } + +/* end dlmalloc implementation */ +#else +#error need to select valid heap implementation or provide wrapper +#endif + +static void heap_free_delayed_list(void) +{ + struct list_node list; + + list_initialize(&list); + + spin_lock_saved_state_t state; + spin_lock_irqsave(&delayed_free_lock, state); + + struct list_node *node; + while ((node = list_remove_head(&delayed_free_list))) { + list_add_head(&list, node); + } + spin_unlock_irqrestore(&delayed_free_lock, state); + + while ((node = list_remove_head(&list))) { + LTRACEF("freeing node %p\n", node); + HEAP_FREE(node); + } +} + +void heap_init(void) +{ + HEAP_INIT(); +} + +void heap_trim(void) +{ + // deal with the pending free list + if (unlikely(!list_is_empty(&delayed_free_list))) { + heap_free_delayed_list(); + } + + HEAP_TRIM(); +} + +void *malloc(size_t size) +{ + LTRACEF("size %zd\n", size); + + // deal with the pending free list + if (unlikely(!list_is_empty(&delayed_free_list))) { + heap_free_delayed_list(); + } + + return HEAP_MALLOC(size); +} + +void *memalign(size_t boundary, size_t size) +{ + LTRACEF("boundary %zu, size %zd\n", boundary, size); + + // deal with the pending free list + if (unlikely(!list_is_empty(&delayed_free_list))) { + heap_free_delayed_list(); + } + + return HEAP_MEMALIGN(boundary, size); +} + +void *calloc(size_t count, size_t size) +{ + LTRACEF("count %zu, size %zd\n", count, size); + + // deal with the pending free list + if (unlikely(!list_is_empty(&delayed_free_list))) { + heap_free_delayed_list(); + } + + return HEAP_CALLOC(count, size); +} + +void *realloc(void *ptr, size_t size) +{ + LTRACEF("ptr %p, size %zd\n", ptr, size); + + // deal with the pending free list + if (unlikely(!list_is_empty(&delayed_free_list))) { + heap_free_delayed_list(); + } + + return HEAP_REALLOC(ptr, size); +} + +void free(void *ptr) +{ + LTRACEF("ptr %p\n", ptr); + + HEAP_FREE(ptr); +} + +/* critical section time delayed free */ +void heap_delayed_free(void *ptr) +{ + LTRACEF("ptr %p\n", ptr); + + /* throw down a structure on the free block */ + /* XXX assumes the free block is large enough to hold a list node */ + struct list_node *node = (struct list_node *)ptr; + + spin_lock_saved_state_t state; + spin_lock_irqsave(&delayed_free_lock, state); + list_add_head(&delayed_free_list, node); + spin_unlock_irqrestore(&delayed_free_lock, state); +} + +static void heap_dump(void) +{ + HEAP_DUMP(); + + printf("\tdelayed free list:\n"); + spin_lock_saved_state_t state; + spin_lock_irqsave(&delayed_free_lock, state); + struct list_node *node; + list_for_every(&delayed_free_list, node) { + printf("\t\tnode %p\n", node); + } + spin_unlock_irqrestore(&delayed_free_lock, state); +} + +#if 0 +static void heap_test(void) +{ + void *ptr[16]; + + ptr[0] = heap_alloc(8, 0); + ptr[1] = heap_alloc(32, 0); + ptr[2] = heap_alloc(7, 0); + ptr[3] = heap_alloc(0, 0); + ptr[4] = heap_alloc(98713, 0); + ptr[5] = heap_alloc(16, 0); + + heap_free(ptr[5]); + heap_free(ptr[1]); + heap_free(ptr[3]); + heap_free(ptr[0]); + heap_free(ptr[4]); + heap_free(ptr[2]); + + heap_dump(); + + int i; + for (i=0; i < 16; i++) + ptr[i] = 0; + + for (i=0; i < 32768; i++) { + unsigned int index = (unsigned int)rand() % 16; + + if ((i % (16*1024)) == 0) + printf("pass %d\n", i); + +// printf("index 0x%x\n", index); + if (ptr[index]) { +// printf("freeing ptr[0x%x] = %p\n", index, ptr[index]); + heap_free(ptr[index]); + ptr[index] = 0; + } + unsigned int align = 1 << ((unsigned int)rand() % 8); + ptr[index] = heap_alloc((unsigned int)rand() % 32768, align); +// printf("ptr[0x%x] = %p, align 0x%x\n", index, ptr[index], align); + + DEBUG_ASSERT(((addr_t)ptr[index] % align) == 0); +// heap_dump(); + } + + for (i=0; i < 16; i++) { + if (ptr[i]) + heap_free(ptr[i]); + } + + heap_dump(); +} +#endif + + +#if LK_DEBUGLEVEL > 1 +#if WITH_LIB_CONSOLE + +#include + +static int cmd_heap(int argc, const cmd_args *argv); + +STATIC_COMMAND_START +STATIC_COMMAND("heap", "heap debug commands", &cmd_heap) +STATIC_COMMAND_END(heap); + +static int cmd_heap(int argc, const cmd_args *argv) +{ + if (argc < 2) { +notenoughargs: + printf("not enough arguments\n"); +usage: + printf("usage:\n"); + printf("\t%s info\n", argv[0].str); + printf("\t%s trim\n", argv[0].str); + printf("\t%s alloc [alignment]\n", argv[0].str); + printf("\t%s realloc \n", argv[0].str); + printf("\t%s free
\n", argv[0].str); + return -1; + } + + if (strcmp(argv[1].str, "info") == 0) { + heap_dump(); + } else if (strcmp(argv[1].str, "trim") == 0) { + heap_trim(); + } else if (strcmp(argv[1].str, "alloc") == 0) { + if (argc < 3) goto notenoughargs; + + void *ptr = memalign((argc >= 4) ? argv[3].u : 0, argv[2].u); + printf("memalign returns %p\n", ptr); + } else if (strcmp(argv[1].str, "realloc") == 0) { + if (argc < 4) goto notenoughargs; + + void *ptr = realloc(argv[2].p, argv[3].u); + printf("realloc returns %p\n", ptr); + } else if (strcmp(argv[1].str, "free") == 0) { + if (argc < 2) goto notenoughargs; + + free(argv[2].p); + } else { + printf("unrecognized command\n"); + goto usage; + } + + return 0; +} + +#endif +#endif + + diff --git a/include/lib/heap.h b/lib/heap/include/lib/heap.h similarity index 76% rename from include/lib/heap.h rename to lib/heap/include/lib/heap.h index 0b0546f0..0a52485e 100644 --- a/include/lib/heap.h +++ b/lib/heap/include/lib/heap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2014 Travis Geiselbrecht + * Copyright (c) 2008-2015 Travis Geiselbrecht * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files @@ -20,8 +20,7 @@ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef __LIB_HEAP_H -#define __LIB_HEAP_H +#pragma once #include #include @@ -29,25 +28,15 @@ __BEGIN_CDECLS; -struct heap_stats { - void* heap_start; - size_t heap_len; - size_t heap_free; - size_t heap_max_chunk; - size_t heap_low_watermark; -}; - -void *heap_alloc(size_t, unsigned int alignment); -void heap_free(void *); +/* standard heap definitions */ +void *malloc(size_t size) __MALLOC; +void *memalign(size_t boundary, size_t size) __MALLOC; +void *calloc(size_t count, size_t size) __MALLOC; +void *realloc(void *ptr, size_t size) __MALLOC; +void free(void *ptr); void heap_init(void); -void heap_add_block(void *, size_t); - -void heap_get_stats(struct heap_stats *ptr); - /* critical section time delayed free */ void heap_delayed_free(void *); __END_CDECLS; - -#endif diff --git a/lib/libc/malloc.c b/lib/heap/miniheap/include/lib/miniheap.h similarity index 60% rename from lib/libc/malloc.c rename to lib/heap/miniheap/include/lib/miniheap.h index cbad393c..b157d3f1 100644 --- a/lib/libc/malloc.c +++ b/lib/heap/miniheap/include/lib/miniheap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Travis Geiselbrecht + * Copyright (c) 2015 Travis Geiselbrecht * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files @@ -20,52 +20,28 @@ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include -#include -#include -#include +#pragma once -void *malloc(size_t size) -{ - return heap_alloc(size, 0); -} +#include -void *memalign(size_t boundary, size_t size) -{ - return heap_alloc(size, boundary); -} +__BEGIN_CDECLS; -void *calloc(size_t count, size_t size) -{ - void *ptr; - size_t realsize = count * size; +struct miniheap_stats { + void* heap_start; + size_t heap_len; + size_t heap_free; + size_t heap_max_chunk; + size_t heap_low_watermark; +}; - ptr = heap_alloc(realsize, 0); - if (!ptr) - return NULL; +void miniheap_get_stats(struct miniheap_stats *ptr); - memset(ptr, 0, realsize); - return ptr; -} +void *miniheap_alloc(size_t, unsigned int alignment); +void *miniheap_realloc(void *, size_t); +void miniheap_free(void *); -void *realloc(void *ptr, size_t size) -{ - if (!ptr) - return malloc(size); - - // XXX better implementation - void *p = malloc(size); - if (!p) - return NULL; - - memcpy(p, ptr, size); // XXX wrong - free(ptr); - - return p; -} - -void free(void *ptr) -{ - return heap_free(ptr); -} +void miniheap_init(void *ptr, size_t len); +void miniheap_dump(void); +void miniheap_trim(void); +__END_CDECLS; diff --git a/lib/heap/miniheap/miniheap.c b/lib/heap/miniheap/miniheap.c new file mode 100644 index 00000000..e4a7f090 --- /dev/null +++ b/lib/heap/miniheap/miniheap.c @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2008-2009,2012-2015 Travis Geiselbrecht + * Copyright (c) 2009 Corey Tabaka + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LOCAL_TRACE 0 + +#define DEBUG_HEAP 0 +#define ALLOC_FILL 0x99 +#define FREE_FILL 0x77 +#define PADDING_FILL 0x55 +#define PADDING_SIZE 64 + +#define HEAP_MAGIC 'HEAP' + +struct free_heap_chunk { + struct list_node node; + size_t len; +}; + +struct heap { + void *base; + size_t len; + size_t remaining; + size_t low_watermark; + mutex_t lock; + struct list_node free_list; +}; + +// heap static vars +static struct heap theheap; + +// structure placed at the beginning every allocation +struct alloc_struct_begin { +#if LK_DEBUGLEVEL > 1 + unsigned int magic; +#endif + void *ptr; + size_t size; +#if DEBUG_HEAP + void *padding_start; + size_t padding_size; +#endif +}; + +static ssize_t heap_grow(size_t len); + +static void dump_free_chunk(struct free_heap_chunk *chunk) +{ + dprintf(INFO, "\t\tbase %p, end 0x%lx, len 0x%zx\n", chunk, (vaddr_t)chunk + chunk->len, chunk->len); +} + +void miniheap_dump(void) +{ + dprintf(INFO, "Heap dump:\n"); + dprintf(INFO, "\tbase %p, len 0x%zx\n", theheap.base, theheap.len); + dprintf(INFO, "\tfree list:\n"); + + mutex_acquire(&theheap.lock); + + struct free_heap_chunk *chunk; + list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { + dump_free_chunk(chunk); + } + mutex_release(&theheap.lock); + +} + +// try to insert this free chunk into the free list, consuming the chunk by merging it with +// nearby ones if possible. Returns base of whatever chunk it became in the list. +static struct free_heap_chunk *heap_insert_free_chunk(struct free_heap_chunk *chunk) +{ +#if LK_DEBUGLEVEL > INFO + vaddr_t chunk_end = (vaddr_t)chunk + chunk->len; +#endif + + LTRACEF("chunk ptr %p, size 0x%zx\n", chunk, chunk->len); + + struct free_heap_chunk *next_chunk; + struct free_heap_chunk *last_chunk; + + mutex_acquire(&theheap.lock); + + theheap.remaining += chunk->len; + + // walk through the list, finding the node to insert before + list_for_every_entry(&theheap.free_list, next_chunk, struct free_heap_chunk, node) { + if (chunk < next_chunk) { + DEBUG_ASSERT(chunk_end <= (vaddr_t)next_chunk); + + list_add_before(&next_chunk->node, &chunk->node); + + goto try_merge; + } + } + + // walked off the end of the list, add it at the tail + list_add_tail(&theheap.free_list, &chunk->node); + + // try to merge with the previous chunk +try_merge: + last_chunk = list_prev_type(&theheap.free_list, &chunk->node, struct free_heap_chunk, node); + if (last_chunk) { + if ((vaddr_t)last_chunk + last_chunk->len == (vaddr_t)chunk) { + // easy, just extend the previous chunk + last_chunk->len += chunk->len; + + // remove ourself from the list + list_delete(&chunk->node); + + // set the chunk pointer to the newly extended chunk, in case + // it needs to merge with the next chunk below + chunk = last_chunk; + } + } + + // try to merge with the next chunk + if (next_chunk) { + if ((vaddr_t)chunk + chunk->len == (vaddr_t)next_chunk) { + // extend our chunk + chunk->len += next_chunk->len; + + // remove them from the list + list_delete(&next_chunk->node); + } + } + + mutex_release(&theheap.lock); + + return chunk; +} + +static struct free_heap_chunk *heap_create_free_chunk(void *ptr, size_t len, bool allow_debug) +{ + DEBUG_ASSERT((len % sizeof(void *)) == 0); // size must be aligned on pointer boundary + +#if DEBUG_HEAP + if (allow_debug) + memset(ptr, FREE_FILL, len); +#endif + + struct free_heap_chunk *chunk = (struct free_heap_chunk *)ptr; + chunk->len = len; + + return chunk; +} + +void *miniheap_alloc(size_t size, unsigned int alignment) +{ + void *ptr; +#if DEBUG_HEAP + size_t original_size = size; +#endif + + LTRACEF("size %zd, align %d\n", size, alignment); + + // alignment must be power of 2 + if (alignment & (alignment - 1)) + return NULL; + + // we always put a size field + base pointer + magic in front of the allocation + size += sizeof(struct alloc_struct_begin); +#if DEBUG_HEAP + size += PADDING_SIZE; +#endif + + // make sure we allocate at least the size of a struct free_heap_chunk so that + // when we free it, we can create a struct free_heap_chunk struct and stick it + // in the spot + if (size < sizeof(struct free_heap_chunk)) + size = sizeof(struct free_heap_chunk); + + // round up size to a multiple of native pointer size + size = ROUNDUP(size, sizeof(void *)); + + // deal with nonzero alignments + if (alignment > 0) { + if (alignment < 16) + alignment = 16; + + // add alignment for worst case fit + size += alignment; + } + + int retry_count = 0; +retry: + mutex_acquire(&theheap.lock); + + // walk through the list + ptr = NULL; + struct free_heap_chunk *chunk; + list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { + DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size + + // is it big enough to service our allocation? + if (chunk->len >= size) { + ptr = chunk; + + // remove it from the list + struct list_node *next_node = list_next(&theheap.free_list, &chunk->node); + list_delete(&chunk->node); + + if (chunk->len > size + sizeof(struct free_heap_chunk)) { + // there's enough space in this chunk to create a new one after the allocation + struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size, true); + + // truncate this chunk + chunk->len -= chunk->len - size; + + // add the new one where chunk used to be + if (next_node) + list_add_before(next_node, &newchunk->node); + else + list_add_tail(&theheap.free_list, &newchunk->node); + } + + // the allocated size is actually the length of this chunk, not the size requested + DEBUG_ASSERT(chunk->len >= size); + size = chunk->len; + +#if DEBUG_HEAP + memset(ptr, ALLOC_FILL, size); +#endif + + ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin)); + + // align the output if requested + if (alignment > 0) { + ptr = (void *)ROUNDUP((addr_t)ptr, (addr_t)alignment); + } + + struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; + as--; +#if LK_DEBUGLEVEL > 1 + as->magic = HEAP_MAGIC; +#endif + as->ptr = (void *)chunk; + as->size = size; + theheap.remaining -= size; + + if (theheap.remaining < theheap.low_watermark) { + theheap.low_watermark = theheap.remaining; + } +#if DEBUG_HEAP + as->padding_start = ((uint8_t *)ptr + original_size); + as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size)); +// printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size); + + memset(as->padding_start, PADDING_FILL, as->padding_size); +#endif + + break; + } + } + + mutex_release(&theheap.lock); + + /* try to grow the heap if we can */ + if (ptr == NULL && retry_count == 0) { + ssize_t err = heap_grow(size); + if (err >= 0) { + retry_count++; + goto retry; + } + } + + LTRACEF("returning ptr %p\n", ptr); + + return ptr; +} + +void *miniheap_realloc(void *ptr, size_t size) +{ + /* slow implementation */ + if (!ptr) + return miniheap_alloc(size, 0); + if (size == 0) { + miniheap_free(ptr); + return NULL; + } + + // XXX better implementation + void *p = miniheap_alloc(size, 0); + if (!p) + return NULL; + + memcpy(p, ptr, size); // XXX wrong + miniheap_free(ptr); + + return p; +} + +void miniheap_free(void *ptr) +{ + if (!ptr) + return; + + LTRACEF("ptr %p\n", ptr); + + // check for the old allocation structure + struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; + as--; + + DEBUG_ASSERT(as->magic == HEAP_MAGIC); + +#if DEBUG_HEAP + { + uint i; + uint8_t *pad = (uint8_t *)as->padding_start; + + for (i = 0; i < as->padding_size; i++) { + if (pad[i] != PADDING_FILL) { + printf("free at %p scribbled outside the lines:\n", ptr); + hexdump(pad, as->padding_size); + panic("die\n"); + } + } + } +#endif + + LTRACEF("allocation was %zd bytes long at ptr %p\n", as->size, as->ptr); + + // looks good, create a free chunk and add it to the pool + heap_insert_free_chunk(heap_create_free_chunk(as->ptr, as->size, true)); +} + +void miniheap_get_stats(struct miniheap_stats *ptr) +{ + struct free_heap_chunk *chunk; + + ptr->heap_start = theheap.base; + ptr->heap_len = theheap.len; + ptr->heap_free=0; + ptr->heap_max_chunk = 0; + + mutex_acquire(&theheap.lock); + + list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { + ptr->heap_free += chunk->len; + + if (chunk->len > ptr->heap_max_chunk) { + ptr->heap_max_chunk = chunk->len; + } + } + + ptr->heap_low_watermark = theheap.low_watermark; + + mutex_release(&theheap.lock); +} + +void miniheap_trim(void) +{ + /* currently does nothing */ +} + +static ssize_t heap_grow(size_t size) +{ + size = ROUNDUP(size, PAGE_SIZE); + void *ptr = page_alloc(size / PAGE_SIZE); + if (!ptr) { + TRACEF("failed to grow kernel heap by 0x%zx bytes\n", size); + return ERR_NO_MEMORY; + } + + LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr); + + heap_insert_free_chunk(heap_create_free_chunk(ptr, size, true)); + + /* change the heap start and end variables */ + if ((uintptr_t)ptr < (uintptr_t)theheap.base || theheap.base == 0) + theheap.base = ptr; + + uintptr_t endptr = (uintptr_t)ptr + size; + if (endptr > (uintptr_t)theheap.base + theheap.len) { + theheap.len = (uintptr_t)endptr - (uintptr_t)theheap.base; + } + + return size; +} + +void miniheap_init(void *ptr, size_t len) +{ + LTRACEF("ptr %p, len %zu\n", ptr, len); + + // create a mutex + mutex_init(&theheap.lock); + + // initialize the free list + list_initialize(&theheap.free_list); + + // set the heap range + theheap.base = ptr; + theheap.len = len; + theheap.remaining = 0; // will get set by heap_insert_free_chunk() + theheap.low_watermark = 0; + + // if passed a default range, use it + if (len > 0) + heap_insert_free_chunk(heap_create_free_chunk(ptr, len, true)); +} + diff --git a/lib/heap/miniheap/rules.mk b/lib/heap/miniheap/rules.mk new file mode 100644 index 00000000..c921d02f --- /dev/null +++ b/lib/heap/miniheap/rules.mk @@ -0,0 +1,10 @@ +LOCAL_DIR := $(GET_LOCAL_DIR) + +GLOBAL_INCLUDES += $(LOCAL_DIR)/include + +MODULE := $(LOCAL_DIR) + +MODULE_SRCS += \ + $(LOCAL_DIR)/miniheap.c + +include make/module.mk diff --git a/lib/libc/new.cpp b/lib/heap/new.cpp similarity index 89% rename from lib/libc/new.cpp rename to lib/heap/new.cpp index e9ebaae1..15c01840 100644 --- a/lib/libc/new.cpp +++ b/lib/heap/new.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Travis Geiselbrecht + * Copyright (c) 2006-2015 Travis Geiselbrecht * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files @@ -26,26 +26,26 @@ void *operator new(size_t s) { - return heap_alloc(s, 0); + return malloc(s); } void *operator new[](size_t s) { - return heap_alloc(s, 0); + return malloc(s); } void *operator new(size_t , void *p) { - return p; + return p; } void operator delete(void *p) { - return heap_free(p); + return free(p); } void operator delete[](void *p) { - return heap_free(p); + return free(p); } diff --git a/lib/heap/page_alloc.c b/lib/heap/page_alloc.c new file mode 100644 index 00000000..15924b18 --- /dev/null +++ b/lib/heap/page_alloc.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015 Google, Inc. All rights reserved + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#if WITH_KERNEL_VM +#include +#else +#include +#endif + +/* A simple page-aligned wrapper around the pmm or novm implementation of + * the underlying physical page allocator. Used by system heaps or any + * other user that wants pages of memory but doesn't want to use LK + * specific apis. + */ +#define LOCAL_TRACE 0 + +#if WITH_STATIC_HEAP + +#error "fix static heap post page allocator and novm stuff" + +#if !defined(HEAP_START) || !defined(HEAP_LEN) +#error WITH_STATIC_HEAP set but no HEAP_START or HEAP_LEN defined +#endif + +#endif + +void *page_alloc(size_t pages) { +#if WITH_KERNEL_VM + void *result = pmm_alloc_kpages(pages, NULL); + return result; +#else + void *result = novm_alloc_pages(pages, NOVM_ARENA_ANY); + return result; +#endif +} + +void page_free(void *ptr, size_t pages) { +#if WITH_KERNEL_VM + DEBUG_ASSERT(IS_PAGE_ALIGNED((uintptr_t)ptr)); + + pmm_free_kpages(ptr, pages); +#else + novm_free_pages(ptr, pages); +#endif +} + +void *page_first_alloc(size_t *size_return) { +#if WITH_KERNEL_VM + *size_return = PAGE_SIZE; + return page_alloc(1); +#else + return novm_alloc_unaligned(size_return); +#endif +} + +#if LK_DEBUGLEVEL > 1 +#if WITH_LIB_CONSOLE + +#include + +static int cmd_page_alloc(int argc, const cmd_args *argv); +static void page_alloc_dump(void); + +STATIC_COMMAND_START +STATIC_COMMAND("page_alloc", "page allocator debug commands", &cmd_page_alloc) +STATIC_COMMAND_END(page_alloc); + +static int cmd_page_alloc(int argc, const cmd_args *argv) +{ + if (argc != 2) { +notenoughargs: + printf("not enough arguments\n"); +usage: + printf("usage:\n"); + printf("\t%s info\n", argv[0].str); + return -1; + } + + if (strcmp(argv[1].str, "info") == 0) { + page_alloc_dump(); + } else { + printf("unrecognized command\n"); + goto usage; + } + + return 0; +} + +static void page_alloc_dump(void) +{ +#ifdef WITH_KERNEL_VM + dprintf(INFO, "Page allocator is based on pmm\n"); +#else + dprintf(INFO, "Page allocator is based on novm\n"); +#endif +} + +#endif +#endif diff --git a/lib/heap/rules.mk b/lib/heap/rules.mk index 5c9d2d49..d332d922 100644 --- a/lib/heap/rules.mk +++ b/lib/heap/rules.mk @@ -1,8 +1,29 @@ LOCAL_DIR := $(GET_LOCAL_DIR) +GLOBAL_INCLUDES += $(LOCAL_DIR)/include + MODULE := $(LOCAL_DIR) MODULE_SRCS += \ - $(LOCAL_DIR)/heap.c + $(LOCAL_DIR)/heap_wrapper.c \ + $(LOCAL_DIR)/page_alloc.c + +ifeq ($(WITH_CPP_SUPPORT),true) +MODULE_SRCS += \ + $(LOCAL_DIR)/new.cpp +endif + +# pick a heap implementation +ifndef LK_HEAP_IMPLEMENTATION +LK_HEAP_IMPLEMENTATION=miniheap +endif +ifeq ($(LK_HEAP_IMPLEMENTATION),miniheap) +MODULE_DEPS := $(LOCAL_DIR)/miniheap +endif +ifeq ($(LK_HEAP_IMPLEMENTATION),dlmalloc) +MODULE_DEPS := $(LOCAL_DIR)/dlmalloc +endif + +GLOBAL_DEFINES += LK_HEAP_IMPLEMENTATION=$(LK_HEAP_IMPLEMENTATION) include make/module.mk diff --git a/lib/libc/rules.mk b/lib/libc/rules.mk index d2130509..6548dc7c 100644 --- a/lib/libc/rules.mk +++ b/lib/libc/rules.mk @@ -2,6 +2,9 @@ LOCAL_DIR := $(GET_LOCAL_DIR) MODULE := $(LOCAL_DIR) +MODULE_DEPS := \ + lib/heap + MODULE_SRCS += \ $(LOCAL_DIR)/atoi.c \ $(LOCAL_DIR)/bsearch.c \ @@ -15,13 +18,8 @@ MODULE_SRCS += \ $(LOCAL_DIR)/qsort.c \ $(LOCAL_DIR)/eabi.c -ifneq ($(WITH_CUSTOM_MALLOC),true) -MODULE_SRCS += $(LOCAL_DIR)/malloc.c -endif - ifeq ($(WITH_CPP_SUPPORT),true) MODULE_SRCS += \ - $(LOCAL_DIR)/new.cpp \ $(LOCAL_DIR)/atexit.c \ $(LOCAL_DIR)/pure_virtual.cpp endif diff --git a/platform/alterasoc/rules.mk b/platform/alterasoc/rules.mk index c0a41592..3cf03131 100644 --- a/platform/alterasoc/rules.mk +++ b/platform/alterasoc/rules.mk @@ -20,10 +20,6 @@ MODULE_SRCS += \ MEMBASE := 0x0 MEMSIZE ?= 0x10000000 # 256MB -GLOBAL_DEFINES += \ - MEMBASE=$(MEMBASE) \ - MEMSIZE=$(MEMSIZE) - LINKER_SCRIPT += \ $(BUILDDIR)/system-onesegment.ld diff --git a/platform/am335x/rules.mk b/platform/am335x/rules.mk index b3342564..ecbf1625 100644 --- a/platform/am335x/rules.mk +++ b/platform/am335x/rules.mk @@ -34,7 +34,6 @@ LINKER_SCRIPT += \ $(BUILDDIR)/system-onesegment.ld GLOBAL_DEFINES += \ - MEMBASE=$(MEMBASE) \ SDRAM_BASE=$(MEMBASE) \ SDRAM_SIZE=$(MEMSIZE) \ diff --git a/platform/armemu/rules.mk b/platform/armemu/rules.mk index 711a3db5..4af5cb0c 100644 --- a/platform/armemu/rules.mk +++ b/platform/armemu/rules.mk @@ -29,10 +29,6 @@ MODULE_DEPS += \ MEMBASE := 0x0 MEMSIZE := 0x400000 # 4MB -GLOBAL_DEFINES += \ - MEMBASE=$(MEMBASE) \ - MEMSIZE=$(MEMSIZE) - LINKER_SCRIPT += \ $(BUILDDIR)/system-onesegment.ld diff --git a/platform/bcm2835/rules.mk b/platform/bcm2835/rules.mk index 26f2b7e1..62b6da58 100644 --- a/platform/bcm2835/rules.mk +++ b/platform/bcm2835/rules.mk @@ -33,8 +33,6 @@ KERNEL_LOAD_OFFSET := 0x00008000 # loaded 32KB into physical KERNEL_BASE = 0x80000000 GLOBAL_DEFINES += \ - MEMBASE=$(MEMBASE) \ - MEMSIZE=$(MEMSIZE) \ ARM_ARCH_WAIT_FOR_SECONDARIES=1 LINKER_SCRIPT += \ diff --git a/platform/microblaze/rules.mk b/platform/microblaze/rules.mk index 492184ff..3d014e97 100644 --- a/platform/microblaze/rules.mk +++ b/platform/microblaze/rules.mk @@ -18,8 +18,4 @@ MEMSIZE ?= 0x20000 # 128KB MODULE_DEPS += \ -GLOBAL_DEFINES += \ - MEMBASE=$(MEMBASE) \ - MEMSIZE=$(MEMSIZE) - include make/module.mk diff --git a/platform/omap3/rules.mk b/platform/omap3/rules.mk index 68f2379b..a091a927 100644 --- a/platform/omap3/rules.mk +++ b/platform/omap3/rules.mk @@ -23,7 +23,7 @@ MODULE_SRCS += \ MEMBASE := 0x80000000 -GLOBAL_DEFINES += MEMBASE=$(MEMBASE) \ +GLOBAL_DEFINES += \ WITH_CPU_EARLY_INIT=1 LINKER_SCRIPT += \ diff --git a/platform/or1ksim/rules.mk b/platform/or1ksim/rules.mk index fc644991..f8782449 100644 --- a/platform/or1ksim/rules.mk +++ b/platform/or1ksim/rules.mk @@ -21,8 +21,4 @@ WITH_KERNEL_VM=1 KERNEL_BASE = 0xc0000000 -GLOBAL_DEFINES += \ - MEMBASE=$(MEMBASE) \ - MEMSIZE=$(MEMSIZE) - include make/module.mk diff --git a/platform/qemu-virt/rules.mk b/platform/qemu-virt/rules.mk index f0702f74..121108ff 100644 --- a/platform/qemu-virt/rules.mk +++ b/platform/qemu-virt/rules.mk @@ -13,6 +13,8 @@ ARM_CPU ?= cortex-a15 endif WITH_SMP ?= 1 +LK_HEAP_IMPLEMENTATION ?= dlmalloc + MODULE_SRCS += \ $(LOCAL_DIR)/debug.c \ $(LOCAL_DIR)/platform.c \ diff --git a/platform/stm32f4xx/rules.mk b/platform/stm32f4xx/rules.mk index 3dc67e14..54fc5a52 100644 --- a/platform/stm32f4xx/rules.mk +++ b/platform/stm32f4xx/rules.mk @@ -26,9 +26,6 @@ ifeq ($(FOUND_CHIP),) $(error unknown STM32F4xx chip $(STM32_CHIP)) endif -GLOBAL_DEFINES += \ - MEMSIZE=$(MEMSIZE) - GLOBAL_INCLUDES += \ $(LOCAL_DIR)/include/dev diff --git a/platform/stm32f7xx/init.c b/platform/stm32f7xx/init.c index 079eabb1..9e05cf6c 100644 --- a/platform/stm32f7xx/init.c +++ b/platform/stm32f7xx/init.c @@ -28,6 +28,7 @@ #include #include #include +#include #include uint32_t SystemCoreClock = HSI_VALUE; @@ -291,6 +292,9 @@ void platform_early_init(void) #if defined(ENABLE_SDRAM) /* initialize SDRAM */ stm32_sdram_init((sdram_config_t *)&target_sdram_config); + + /* add a novm arena for it */ + novm_add_arena("sdram", SDRAM_BASE, SDRAM_SIZE); #endif mpu_init(); diff --git a/platform/stm32f7xx/rules.mk b/platform/stm32f7xx/rules.mk index bc92d92b..b1f862d8 100644 --- a/platform/stm32f7xx/rules.mk +++ b/platform/stm32f7xx/rules.mk @@ -23,9 +23,11 @@ ifeq ($(FOUND_CHIP),) $(error unknown STM32F7xx chip $(STM32_CHIP)) endif +LK_HEAP_IMPLEMENTATION ?= miniheap + GLOBAL_DEFINES += \ - MEMSIZE=$(MEMSIZE) \ - PLATFORM_SUPPORTS_PANIC_SHELL=1 + PLATFORM_SUPPORTS_PANIC_SHELL=1 \ + NOVM_MAX_ARENAS=2 MODULE_SRCS += \ $(LOCAL_DIR)/debug.c \ diff --git a/platform/zynq/rules.mk b/platform/zynq/rules.mk index 230e6dfc..77a8e497 100644 --- a/platform/zynq/rules.mk +++ b/platform/zynq/rules.mk @@ -72,8 +72,6 @@ endif KERNEL_BASE = 0xc0000000 GLOBAL_DEFINES += \ - MEMBASE=$(MEMBASE) \ - MEMSIZE=$(MEMSIZE) \ SDRAM_SIZE=$(ZYNQ_SDRAM_SIZE) LINKER_SCRIPT += \ diff --git a/platform/zynq/spiflash.c b/platform/zynq/spiflash.c index 488cf5fd..98b66d8f 100644 --- a/platform/zynq/spiflash.c +++ b/platform/zynq/spiflash.c @@ -526,7 +526,7 @@ usage: return -1; } - status_t err = qspi_write_page(&flash.qspi, argv[2].u, (void *)argv[4].u); + status_t err = qspi_write_page(&flash.qspi, argv[2].u, argv[4].p); printf("write_page returns %d\n", err); } else if (!strcmp(argv[1].str, "erase")) { if (argc < 3) goto notenoughargs; diff --git a/target/stm32746g-eval2/rules.mk b/target/stm32746g-eval2/rules.mk index 0c67521b..2e7360ee 100644 --- a/target/stm32746g-eval2/rules.mk +++ b/target/stm32746g-eval2/rules.mk @@ -8,13 +8,9 @@ PLATFORM := stm32f7xx SDRAM_SIZE := 0x02000000 SDRAM_BASE := 0xc0000000 -LCD_M_SIZE := 0x0012c000 EXT_SRAM_BASE := 0x68000000 EXT_SRAM_SIZE := 0x00200000 -HEAP_START := 0xc012c000 -HEAP_SIZE := 0x01ed4000 - GLOBAL_DEFINES += \ ENABLE_UART1=1 \ ENABLE_SDRAM=1 \ @@ -23,10 +19,6 @@ GLOBAL_DEFINES += \ EXT_SRAM_BASE=$(EXT_SRAM_BASE) \ EXT_SRAM_SIZE=$(EXT_SRAM_SIZE) \ ENABLE_EXT_SRAM=1 \ -\ - WITH_STATIC_HEAP=1 \ - HEAP_START=$(HEAP_START) \ - HEAP_LEN=$(HEAP_SIZE) \ \ PKTBUF_POOL_SIZE=16 diff --git a/target/stm32f746g-disco/init.c b/target/stm32f746g-disco/init.c index 81983142..48c95fb4 100644 --- a/target/stm32f746g-disco/init.c +++ b/target/stm32f746g-disco/init.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -34,13 +35,13 @@ #include #include #include -#include +#include #if WITH_LIB_MINIP #include #endif -extern uint8_t BSP_LCD_Init(uint32_t fb_address); +extern uint8_t BSP_LCD_Init(void); const sdram_config_t target_sdram_config = { .bus_width = SDRAM_BUS_WIDTH_16, @@ -61,8 +62,8 @@ void target_early_init(void) /* now that the uart gpios are configured, enable the debug uart */ stm32_debug_early_init(); - /* The lcd framebuffer starts at the base of SDRAM */ - BSP_LCD_Init(SDRAM_BASE); + /* start the lcd */ + BSP_LCD_Init(); } void target_init(void) diff --git a/target/stm32f746g-disco/lcd.c b/target/stm32f746g-disco/lcd.c index 3eadeefd..25b26b87 100644 --- a/target/stm32f746g-disco/lcd.c +++ b/target/stm32f746g-disco/lcd.c @@ -57,6 +57,7 @@ #include #include #include +#include #include /* @@ -345,7 +346,7 @@ static void BSP_LCD_ClockConfig(LTDC_HandleTypeDef *hltdc, void *Params) * @brief Initializes the LCD. * @retval LCD state */ -uint8_t BSP_LCD_Init(uint32_t fb_address) +uint8_t BSP_LCD_Init(void) { /* Timing Configuration */ ltdc_handle.Init.HorizontalSync = (RK043FN48H_HSYNC - 1); @@ -382,7 +383,13 @@ uint8_t BSP_LCD_Init(uint32_t fb_address) HAL_LTDC_Init(<dc_handle); - BSP_LCD_LayerDefaultInit(0, fb_address); + /* allocate the framebuffer */ + size_t fb_size_pages = PAGE_ALIGN(RK043FN48H_WIDTH * RK043FN48H_HEIGHT * 4) / PAGE_SIZE; + void *fb_address = novm_alloc_pages(fb_size_pages, NOVM_ARENA_SECONDARY); + if (!fb_address) + panic("failed to allocate framebuffer for LCD\n"); + + BSP_LCD_LayerDefaultInit(0, (uint32_t)fb_address); BSP_LCD_SelectLayer(0); /* clear framebuffer */ diff --git a/top/init.c b/top/init.c index 19caed5a..a443544e 100644 --- a/top/init.c +++ b/top/init.c @@ -37,7 +37,7 @@ #define LOCAL_TRACE 0 #define TRACE_INIT (LK_DEBUGLEVEL >= 2) #ifndef EARLIEST_TRACE_LEVEL -#define EARLIEST_TRACE_LEVEL LK_INIT_LEVEL_ARCH_EARLY +#define EARLIEST_TRACE_LEVEL LK_INIT_LEVEL_TARGET_EARLY #endif extern const struct lk_init_struct __lk_init[]; diff --git a/top/main.c b/top/main.c index 03875108..3e6c3091 100644 --- a/top/main.c +++ b/top/main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include