[arch][arm] support for more cpus from external sources

This commit is contained in:
Travis Geiselbrecht
2009-06-28 12:13:33 -07:00
parent 388b0b13f7
commit bb4fd9e57e
6 changed files with 113 additions and 18 deletions

View File

@@ -64,9 +64,45 @@ void arch_early_init(void)
val = (1<<30);
__asm__ volatile("mcr p10, 7, %0, c8, c0, 0" :: "r" (val));
#endif
#if ARM_CPU_CORTEX_A8
/* enable the cycle count register */
uint32_t en;
__asm__ volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (en));
en &= ~(1<<3); /* cycle count every cycle */
en |= 1; /* enable all performance counters */
__asm__ volatile("mcr p15, 0, %0, c9, c12, 0" :: "r" (en));
/* enable cycle counter */
en = (1<<31);
__asm__ volatile("mcr p15, 0, %0, c9, c12, 1" :: "r" (en));
#endif
}
void arch_init(void)
{
}
void arch_quiesce(void)
{
#if ARM_CPU_CORTEX_A8
/* disable the cycle count and performance counters */
uint32_t en;
__asm__ volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (en));
en &= ~1; /* disable all performance counters */
__asm__ volatile("mcr p15, 0, %0, c9, c12, 0" :: "r" (en));
/* disable cycle counter */
en = 0;
__asm__ volatile("mcr p15, 0, %0, c9, c12, 1" :: "r" (en));
#endif
#if ARM_CPU_ARM1136
/* disable the cycle count and performance counters */
uint32_t en;
__asm__ volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (en));
en &= ~1; /* disable all performance counters */
__asm__ volatile("mcr p15, 0, %0, c15, c12, 0" :: "r" (en));
#endif
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008 Travis Geiselbrecht
* Copyright (c) 2008-2009 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
@@ -319,32 +319,53 @@ invalidate_cache_v7:
/* void arch_flush_cache_range(addr_t start, size_t len); */
FUNCTION(arch_clean_cache_range)
#if ARM_WITH_CP15
add r2, r0, r1 // calculate the end address
bic r0, #(CACHE_LINE-1) // align the start with a cache line
0:
mcr p15, 0, r0, c7, c10, 1 // clean cache to PoC by MVA
add r0, r0, #CACHE_LINE
subs r1, r1, #CACHE_LINE
bhs 0b
cmp r0, r2
blo 0b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 // data sync barrier (formerly drain write buffer)
mcr p15, 0, r0, c7, c10, 4 // data sync barrier
#endif
bx lr
/* void arch_flush_invalidate_cache_range(addr_t start, size_t len); */
FUNCTION(arch_clean_invalidate_cache_range)
#if ARM_WITH_CP15
add r2, r0, r1 // calculate the end address
bic r0, #(CACHE_LINE-1) // align the start with a cache line
0:
mcr p15, 0, r0, c7, c14, 1 // clean & invalidate cache to PoC by MVA
mcr p15, 0, r0, c7, c14, 1 // clean & invalidate dcache to PoC by MVA
add r0, r0, #CACHE_LINE
subs r1, r1, #CACHE_LINE
bhs 0b
cmp r0, r2
blo 0b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 // data sync barrier (formerly drain write buffer)
bx lr
#else
#error unhandled cpu
mcr p15, 0, r0, c7, c10, 4 // data sync barrier
#endif
bx lr
/* void arch_invalidate_cache_range(addr_t start, size_t len); */
FUNCTION(arch_invalidate_cache_range)
#if ARM_WITH_CP15
add r2, r0, r1 // calculate the end address
bic r0, #(CACHE_LINE-1) // align the start with a cache line
0:
mcr p15, 0, r0, c7, c6, 1 // invalidate dcache to PoC by MVA
add r0, r0, #CACHE_LINE
cmp r0, r2
blo 0b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 // data sync barrier
#endif
bx lr
#endif // ARM_CPU_...
#else

View File

@@ -35,6 +35,7 @@ void arm_mmu_init(void);
#define MMU_FLAG_BUFFERED 0x2
#define MMU_FLAG_READWRITE 0x4
void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags);
void arm_mmu_unmap_section(addr_t vaddr);
#if defined(__cplusplus)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008 Travis Geiselbrecht
* Copyright (c) 2008-2009 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
@@ -42,11 +42,15 @@ static uint32_t *tt = (void *)MMU_TRANSLATION_TABLE_ADDR;
static uint32_t tt[4096] __ALIGNED(16384);
#endif
#define MMU_FLAG_CACHED 0x1
#define MMU_FLAG_BUFFERED 0x2
#define MMU_FLAG_READWRITE 0x4
void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags)
{
int index;
uint AP;
uint CB;
uint CB = 0;
uint TEX = 0;
#if defined(PLATFORM_MSM7K)
@@ -60,15 +64,37 @@ void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags)
#endif
AP = (flags & MMU_FLAG_READWRITE) ? 0x3 : 0x2;
#if 1
CB = ((flags & MMU_FLAG_CACHED) ? 0x2 : 0) | ((flags & MMU_FLAG_BUFFERED) ? 0x1 : 0);
#elif 0
CB = ((flags & MMU_FLAG_CACHED) ? 0x2 : 0) | ((flags & MMU_FLAG_BUFFERED) ? 0x1 : 0);
if (CB) {
TEX = 1; // full write allocate on all levels
}
#elif 0
// try out some of the extended TEX options
if (flags & MMU_FLAG_CACHED) {
TEX = 6;
CB = 3;
}
#endif
index = vaddr / MB;
// section mapping
tt[index] = (paddr & ~(MB-1)) | (TEX << 12) | (AP << 10) | (0<<5) | (CB << 2) | (2<<0);
arm_invalidate_tlb();
}
void arm_mmu_unmap_section(addr_t vaddr)
{
uint index = vaddr / MB;
tt[index] = 0;
arm_invalidate_tlb();
}
void arm_mmu_init(void)
{
int i;

View File

@@ -40,7 +40,8 @@ FUNCTION(arch_disable_ints)
/* int atomic_swap(int *ptr, int val); */
FUNCTION(atomic_swap)
swp r0, r2, [r1]
swp r2, r1, [r0]
mov r0, r2
bx lr
/* int atomic_add(int *ptr, int val); */
@@ -148,7 +149,7 @@ FUNCTION(atomic_or)
/* void arch_idle(); */
FUNCTION(arch_idle)
#if ARM_CPU_CORTEX_A8
.word 0xe320f003 /* wfi */
wfi
#elif PLATFORM_MSM7K
/* TODO: safely handle wfi */
#elif ARM_CPU_ARM1136 || ARM_CPU_ARM926
@@ -201,3 +202,9 @@ FUNCTION(arm_invalidate_tlb)
FUNCTION(arch_switch_stacks_and_call)
mov sp, r1
bx r0
/* uint32_t arm_read_cycle_count(void); */
FUNCTION(arm_read_cycle_count)
mrc p15, 0, r0, c9, c13, 0
bx lr

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008 Travis Geiselbrecht
* Copyright (c) 2008-2009 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
@@ -29,9 +29,13 @@ extern "C" {
void arch_early_init(void);
void arch_init(void);
void arch_quiesce(void);
#if defined(__cplusplus)
}
#endif
/* arch specific bits */
#include <arch/defines.h>
#endif