[arm][gicv3] pull in new version from trusty

This is a pretty major implementation change of the GIC driver, but it
now handles v3. Unfortunately this is a bit of a revert of some of the
code cleanups and whatnot that have happened on mainline, but will try
to reapply these momentarily.

TODO:
-fix arm32 and computation of SGI targets for v3
-Consider removing all of the secure mode stuff which is really
complicating things and basically untestable in mainline.
-Properly split V2 and V3 into separate files, and have the main gic.c
act as a redirector of calls.
-Allow both v2 and v3 to compile at the same time.
-Make configuration runtime configurable, stop using #define GICBASE and
others.
This commit is contained in:
Travis Geiselbrecht
2025-10-14 01:29:24 -07:00
parent 50864eda02
commit 6a7c4e25d6
9 changed files with 1247 additions and 262 deletions

View File

@@ -1,9 +1,25 @@
/*
* Copyright (c) 2012-2015 Travis Geiselbrecht
* Copyright (c) 2019 LK Trusty Authors. All Rights Reserved.
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <assert.h>
#include <lk/bits.h>
@@ -11,14 +27,17 @@
#include <sys/types.h>
#include <lk/debug.h>
#include <dev/interrupt/arm_gic.h>
#include <inttypes.h>
#include <lk/reg.h>
#include <kernel/thread.h>
#include <kernel/debug.h>
#include <kernel/vm.h>
#include <lk/init.h>
#include <platform/interrupts.h>
#include <arch/ops.h>
#include <platform/gic.h>
#include <lk/trace.h>
#include <inttypes.h>
#if WITH_LIB_SM
#include <lib/sm.h>
#include <lib/sm/sm_err.h>
@@ -26,18 +45,25 @@
#define LOCAL_TRACE 0
#include "arm_gic_common.h"
#if GIC_VERSION > 2
#include "gic_v3.h"
#endif
#if ARCH_ARM
#include <arch/arm.h>
#define iframe arm_iframe
#define IFRAME_PC(frame) ((frame)->pc)
#endif
#if ARCH_ARM64
#include <arch/arm64.h>
#define iframe arm64_iframe_short
#define IFRAME_PC(frame) ((frame)->elr)
#endif
void platform_fiq(struct iframe *frame);
static status_t arm_gic_set_secure_locked(u_int irq, bool secure);
static void gic_set_enable(uint vector, bool enable);
static void arm_gic_init_hw(void);
static spin_lock_t gicd_lock;
#if WITH_LIB_SM
@@ -46,29 +72,42 @@ static spin_lock_t gicd_lock;
#define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS
#endif
#define GIC_MAX_PER_CPU_INT 32
#define GIC_MAX_SGI_INT 16
#if ARM_GIC_USE_DOORBELL_NS_IRQ
static bool doorbell_enabled;
#endif
struct arm_gic arm_gics[NUM_ARM_GICS];
static bool arm_gic_check_init(int irq)
{
/* check if we have a vaddr for gicd, both gicv2 and gicv3/4 use this */
if (!arm_gics[0].gicd_vaddr) {
TRACEF("change to interrupt %d ignored before init\n", irq);
return false;
}
return true;
}
#if WITH_LIB_SM
static bool arm_gic_non_secure_interrupts_frozen;
static bool arm_gic_interrupt_change_allowed(uint irq) {
static bool arm_gic_interrupt_change_allowed(int irq)
{
if (!arm_gic_non_secure_interrupts_frozen)
return true;
return arm_gic_check_init(irq);
TRACEF("change to interrupt %u ignored after booting ns\n", irq);
TRACEF("change to interrupt %d ignored after booting ns\n", irq);
return false;
}
static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd);
#else
static bool arm_gic_interrupt_change_allowed(uint irq) {
return true;
}
static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd) {
static bool arm_gic_interrupt_change_allowed(int irq)
{
return arm_gic_check_init(irq);
}
#endif
struct int_handler_struct {
int_handler handler;
void *arg;
@@ -77,15 +116,20 @@ struct int_handler_struct {
static struct int_handler_struct int_handler_table_per_cpu[GIC_MAX_PER_CPU_INT][SMP_MAX_CPUS];
static struct int_handler_struct int_handler_table_shared[MAX_INT-GIC_MAX_PER_CPU_INT];
static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu) {
if (vector < GIC_MAX_PER_CPU_INT) {
static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu)
{
if (vector < GIC_MAX_PER_CPU_INT)
return &int_handler_table_per_cpu[vector][cpu];
} else {
else
return &int_handler_table_shared[vector - GIC_MAX_PER_CPU_INT];
}
}
void register_int_handler(unsigned int vector, int_handler handler, void *arg) {
#if ARM_GIC_USE_DOORBELL_NS_IRQ
static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority);
#endif
void register_int_handler(unsigned int vector, int_handler handler, void *arg)
{
struct int_handler_struct *h;
uint cpu = arch_curr_cpu_num();
@@ -97,9 +141,29 @@ void register_int_handler(unsigned int vector, int_handler handler, void *arg) {
spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
if (arm_gic_interrupt_change_allowed(vector)) {
#if GIC_VERSION > 2
arm_gicv3_configure_irq_locked(cpu, vector);
#endif
h = get_int_handler(vector, cpu);
h->handler = handler;
h->arg = arg;
#if ARM_GIC_USE_DOORBELL_NS_IRQ
/*
* Use lowest priority Linux does not mask to allow masking the entire
* group while still allowing other interrupts to be delivered.
*/
arm_gic_set_priority_locked(vector, 0xf7);
#endif
/*
* For GICv3, SGIs are maskable, and on GICv2, whether they are
* maskable is implementation defined. As a result, the caller cannot
* rely on them being maskable, so we enable all registered SGIs as if
* they were non-maskable.
*/
if (vector < GIC_MAX_SGI_INT) {
gic_set_enable(vector, true);
}
}
spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
@@ -112,47 +176,10 @@ void register_int_handler_msi(unsigned int vector, int_handler handler, void *ar
register_int_handler(vector, handler, arg);
}
/* main cpu regs */
#define GICC_CTLR (GICC_OFFSET + 0x0000)
#define GICC_PMR (GICC_OFFSET + 0x0004)
#define GICC_BPR (GICC_OFFSET + 0x0008)
#define GICC_IAR (GICC_OFFSET + 0x000c)
#define GICC_EOIR (GICC_OFFSET + 0x0010)
#define GICC_RPR (GICC_OFFSET + 0x0014)
#define GICC_HPPIR (GICC_OFFSET + 0x0018)
#define GICC_APBR (GICC_OFFSET + 0x001c)
#define GICC_AIAR (GICC_OFFSET + 0x0020)
#define GICC_AEOIR (GICC_OFFSET + 0x0024)
#define GICC_AHPPIR (GICC_OFFSET + 0x0028)
#define GICC_APR(n) (GICC_OFFSET + 0x00d0 + (n) * 4)
#define GICC_NSAPR(n) (GICC_OFFSET + 0x00e0 + (n) * 4)
#define GICC_IIDR (GICC_OFFSET + 0x00fc)
#define GICC_DIR (GICC_OFFSET + 0x1000)
/* distribution regs */
#define GICD_CTLR (GICD_OFFSET + 0x000)
#define GICD_TYPER (GICD_OFFSET + 0x004)
#define GICD_IIDR (GICD_OFFSET + 0x008)
#define GICD_IGROUPR(n) (GICD_OFFSET + 0x080 + (n) * 4)
#define GICD_ISENABLER(n) (GICD_OFFSET + 0x100 + (n) * 4)
#define GICD_ICENABLER(n) (GICD_OFFSET + 0x180 + (n) * 4)
#define GICD_ISPENDR(n) (GICD_OFFSET + 0x200 + (n) * 4)
#define GICD_ICPENDR(n) (GICD_OFFSET + 0x280 + (n) * 4)
#define GICD_ISACTIVER(n) (GICD_OFFSET + 0x300 + (n) * 4)
#define GICD_ICACTIVER(n) (GICD_OFFSET + 0x380 + (n) * 4)
#define GICD_IPRIORITYR(n) (GICD_OFFSET + 0x400 + (n) * 4)
#define GICD_ITARGETSR(n) (GICD_OFFSET + 0x800 + (n) * 4)
#define GICD_ICFGR(n) (GICD_OFFSET + 0xc00 + (n) * 4)
#define GICD_NSACR(n) (GICD_OFFSET + 0xe00 + (n) * 4)
#define GICD_SGIR (GICD_OFFSET + 0xf00)
#define GICD_CPENDSGIR(n) (GICD_OFFSET + 0xf10 + (n) * 4)
#define GICD_SPENDSGIR(n) (GICD_OFFSET + 0xf20 + (n) * 4)
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define GIC_REG_COUNT(bit_per_reg) DIV_ROUND_UP(MAX_INT, (bit_per_reg))
#define DEFINE_GIC_SHADOW_REG(name, bit_per_reg, init_val, init_from) \
uint32_t (name)[GIC_REG_COUNT(bit_per_reg)] = { \
[((init_from) / (bit_per_reg)) ... \
[(init_from / bit_per_reg) ... \
(GIC_REG_COUNT(bit_per_reg) - 1)] = (init_val) \
}
@@ -161,68 +188,110 @@ static DEFINE_GIC_SHADOW_REG(gicd_igroupr, 32, ~0U, 0);
#endif
static DEFINE_GIC_SHADOW_REG(gicd_itargetsr, 4, 0x01010101, 32);
// accessor routines for GIC registers that go through the mmio interface
static inline uint32_t gicreg_read32(uint32_t gic, uint32_t register_offset) {
return mmio_read32((volatile uint32_t *)(GICBASE(gic) + register_offset));
}
static inline void gicreg_write32(uint32_t gic, uint32_t register_offset, uint32_t value) {
mmio_write32((volatile uint32_t *)(GICBASE(gic) + register_offset), value);
}
static void gic_set_enable(uint vector, bool enable) {
uint reg = vector / 32;
static void gic_set_enable(uint vector, bool enable)
{
int reg = vector / 32;
uint32_t mask = 1ULL << (vector % 32);
if (enable) {
gicreg_write32(0, GICD_ISENABLER(reg), mask);
} else {
gicreg_write32(0, GICD_ICENABLER(reg), mask);
LTRACEF("%s: vector %u, reg %d, mask 0x%x, enable %d\n", __func__, vector, reg, mask, enable);
#if GIC_VERSION > 2
if (reg == 0) {
uint32_t cpu = arch_curr_cpu_num();
/* On GICv3/v4 these are on GICR */
if (enable)
GICRREG_WRITE(0, cpu, GICR_ISENABLER0, mask);
else
GICRREG_WRITE(0, cpu, GICR_ICENABLER0, mask);
return;
}
#endif
if (enable) {
GICDREG_WRITE(0, GICD_ISENABLER(reg), mask);
} else {
GICDREG_WRITE(0, GICD_ICENABLER(reg), mask);
}
#if GIC_VERSION > 2
/* for GIC V3, make sure write is complete */
arm_gicv3_wait_for_write_complete();
#endif
}
static void arm_gic_init_percpu(uint level) {
#if WITH_LIB_SM
gicreg_write32(0, GICC_CTLR, 0xb); // enable GIC0 and select fiq mode for secure
gicreg_write32(0, GICD_IGROUPR(0), ~0U); /* GICD_IGROUPR0 is banked */
static void arm_gic_init_percpu(uint level)
{
#if GIC_VERSION > 2
/* GICv3/v4 */
arm_gicv3_init_percpu();
#else
gicreg_write32(0, GICC_CTLR, 1); // enable GIC0
/* GICv2 */
#if WITH_LIB_SM
GICCREG_WRITE(0, GICC_CTLR, 0xb); // enable GIC0 and select fiq mode for secure
GICDREG_WRITE(0, GICD_IGROUPR(0), ~0U); /* GICD_IGROUPR0 is banked */
#else
GICCREG_WRITE(0, GICC_CTLR, 1); // enable GIC0
#endif
gicreg_write32(0, GICC_PMR, 0xFF); // unmask interrupts at all priority levels
GICCREG_WRITE(0, GICC_PMR, 0xFF); // unmask interrupts at all priority levels
#endif /* GIC_VERSION > 2 */
}
LK_INIT_HOOK_FLAGS(arm_gic_init_percpu,
arm_gic_init_percpu,
LK_INIT_LEVEL_PLATFORM_EARLY, LK_INIT_FLAG_SECONDARY_CPUS);
static void arm_gic_suspend_cpu(uint level) {
suspend_resume_fiq(false, false);
static void arm_gic_suspend_cpu(uint level)
{
#if GIC_VERSION > 2
arm_gicv3_suspend_cpu(arch_curr_cpu_num());
#endif
}
LK_INIT_HOOK_FLAGS(arm_gic_suspend_cpu, arm_gic_suspend_cpu,
LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_SUSPEND);
LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_OFF);
static void arm_gic_resume_cpu(uint level) {
static void arm_gic_resume_cpu(uint level)
{
spin_lock_saved_state_t state;
bool resume_gicd = false;
__UNUSED bool resume_gicd = false;
spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
if (!(gicreg_read32(0, GICD_CTLR) & 1)) {
#if GIC_VERSION > 2
if (!(GICDREG_READ(0, GICD_CTLR) & 5)) {
#else
if (!(GICDREG_READ(0, GICD_CTLR) & 1)) {
#endif
dprintf(SPEW, "%s: distibutor is off, calling arm_gic_init instead\n", __func__);
arm_gic_init();
arm_gic_init_hw();
resume_gicd = true;
} else {
arm_gic_init_percpu(0);
}
#if GIC_VERSION > 2
{
uint cpu = arch_curr_cpu_num();
uint max_irq = resume_gicd ? MAX_INT : GIC_MAX_PER_CPU_INT;
for (uint v = 0; v < max_irq; v++) {
struct int_handler_struct *h = get_int_handler(v, cpu);
if (h->handler) {
arm_gicv3_configure_irq_locked(cpu, v);
}
}
arm_gicv3_resume_cpu_locked(cpu, resume_gicd);
}
#endif
spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
suspend_resume_fiq(true, resume_gicd);
}
LK_INIT_HOOK_FLAGS(arm_gic_resume_cpu, arm_gic_resume_cpu,
LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_RESUME);
static uint arm_gic_max_cpu(void) {
return (gicreg_read32(0, GICD_TYPER) >> 5) & 0x7;
static int arm_gic_max_cpu(void)
{
return (GICDREG_READ(0, GICD_TYPER) >> 5) & 0x7;
}
static status_t gic_configure_interrupt(unsigned int vector,
@@ -242,21 +311,25 @@ static status_t gic_configure_interrupt(unsigned int vector,
// 16 irqs encoded per ICFGR register
uint32_t reg_ndx = vector >> 4;
uint32_t bit_shift = ((vector & 0xf) << 1) + 1;
uint32_t reg_val = gicreg_read32(0, GICD_ICFGR(reg_ndx));
uint32_t reg_val = GICDREG_READ(0, GICD_ICFGR(reg_ndx));
if (tm == IRQ_TRIGGER_MODE_EDGE) {
reg_val |= (1U << bit_shift);
} else {
reg_val &= ~(1U << bit_shift);
}
gicreg_write32(0, GICD_ICFGR(reg_ndx), reg_val);
GICDREG_WRITE(0, GICD_ICFGR(reg_ndx), reg_val);
return NO_ERROR;
}
void arm_gic_init(void) {
void arm_gic_init_hw(void) {
#if GIC_VERSION > 2
/* GICv3/v4 */
arm_gicv3_init();
#else
// Are we a GICv2?
// NOTE: probably crashes on a V3
uint32_t iidr = gicreg_read32(0, GICC_IIDR);
uint32_t iidr = GICDREG_READ(0, GICC_IIDR);
if (BITS_SHIFT(iidr, 19, 16) != 0x2) {
dprintf(CRITICAL, "GIC: not a GICv2, IIDR 0x%x\n", iidr);
return;
@@ -264,7 +337,7 @@ void arm_gic_init(void) {
dprintf(INFO, "GIC: version %lu\n", BITS_SHIFT(iidr, 19, 16));
// Read how many cpus and interrupts we support
uint32_t type = gicreg_read32(0, GICD_TYPER);
uint32_t type = GICDREG_READ(0, GICD_TYPER);
uint32_t cpu_count = (type >> 5) & 0x7;
uint32_t it_lines = (type & 0x1f) + 1;
if (it_lines > 6) {
@@ -277,14 +350,14 @@ void arm_gic_init(void) {
dprintf(INFO, "GICv2: GICD_TYPER 0x%x, cpu_count %u, max_int %u\n", type, cpu_count + 1, max_int);
for (int i = 0; i < max_int; i+= 32) {
gicreg_write32(0, GICD_ICENABLER(i / 32), ~0);
gicreg_write32(0, GICD_ICPENDR(i / 32), ~0);
GICDREG_WRITE(0, GICD_ICENABLER(i / 32), ~0U);
GICDREG_WRITE(0, GICD_ICPENDR(i / 32), ~0U);
}
if (arm_gic_max_cpu() > 0) {
/* Set external interrupts to target cpu 0 */
for (int i = 32; i < max_int; i += 4) {
gicreg_write32(0, GICD_ITARGETSR(i / 4), gicd_itargetsr[i / 4]);
for (int i = 32; i < MAX_INT; i += 4) {
GICDREG_WRITE(0, GICD_ITARGETSR(i / 4), gicd_itargetsr[i / 4]);
}
}
@@ -293,10 +366,9 @@ void arm_gic_init(void) {
gic_configure_interrupt(i, IRQ_TRIGGER_MODE_EDGE, IRQ_POLARITY_ACTIVE_HIGH);
}
gicreg_write32(0, GICD_CTLR, 1); // enable GIC0
GICDREG_WRITE(0, GICD_CTLR, 1); // enable GIC0
#if WITH_LIB_SM
gicreg_write32(0, GICD_CTLR, 3); // enable GIC0 ns interrupts
GICDREG_WRITE(0, GICD_CTLR, 3); // enable GIC0 ns interrupts
/*
* Iterate through all IRQs and set them to non-secure
* mode. This will allow the non-secure side to handle
@@ -304,13 +376,91 @@ void arm_gic_init(void) {
*/
for (int i = 32; i < max_int; i += 32) {
u_int reg = i / 32;
gicreg_write32(0, GICD_IGROUPR(reg), gicd_igroupr[reg]);
GICDREG_WRITE(0, GICD_IGROUPR(reg), gicd_igroupr[reg]);
}
#endif
#endif /* GIC_VERSION > 2 */
arm_gic_init_percpu(0);
}
static status_t arm_gic_set_secure_locked(u_int irq, bool secure) {
void arm_gic_init(void) {
#ifdef GICBASE
arm_gics[0].gicd_vaddr = GICBASE(0) + GICD_OFFSET;
arm_gics[0].gicd_size = GICD_MIN_SIZE;
TRACEF("GICD base %#lx, size %#zx\n", arm_gics[0].gicd_vaddr, arm_gics[0].gicd_size);
#if GIC_VERSION > 2
arm_gics[0].gicr_vaddr = GICBASE(0) + GICR_OFFSET;
arm_gics[0].gicr_size = GICR_CPU_OFFSET(SMP_MAX_CPUS);
TRACEF("GICR base %#lx, size %#zx\n", arm_gics[0].gicr_vaddr, arm_gics[0].gicr_size);
#else /* GIC_VERSION > 2 */
arm_gics[0].gicc_vaddr = GICBASE(0) + GICC_OFFSET;
arm_gics[0].gicc_size = GICC_MIN_SIZE;
TRACEF("GICC base %#lx, size %#zx\n", arm_gics[0].gicc_vaddr, arm_gics[0].gicc_size);
#endif /* GIC_VERSION > 2 */
#else
/* Platforms should define GICBASE if they want to call this */
panic("%s: GICBASE not defined\n", __func__);
#endif /* GICBASE */
// TODO: map these registers and use those
arm_gic_init_hw();
}
static void arm_map_regs(const char* name,
vaddr_t* vaddr,
paddr_t paddr,
size_t size) {
status_t ret;
void* vaddrp = (void*)vaddr;
if (!size) {
return;
}
ret = vmm_alloc_physical(vmm_get_kernel_aspace(), "gic", size, &vaddrp, 0,
paddr, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE |
ARCH_MMU_FLAG_PERM_NO_EXECUTE);
if (ret) {
panic("%s: failed %d\n", __func__, ret);
}
*vaddr = (vaddr_t)vaddrp;
}
void arm_gic_init_map(struct arm_gic_init_info* init_info)
{
if (init_info->gicd_size < GICD_MIN_SIZE) {
panic("%s: gicd mapping too small %zu\n", __func__,
init_info->gicd_size);
}
arm_map_regs("gicd", &arm_gics[0].gicd_vaddr, init_info->gicd_paddr,
init_info->gicd_size);
arm_gics[0].gicd_size = init_info->gicd_size;
#if GIC_VERSION > 2
if (init_info->gicr_size < GICR_CPU_OFFSET(SMP_MAX_CPUS)) {
panic("%s: gicr mapping too small %zu\n", __func__,
init_info->gicr_size);
}
arm_map_regs("gicr", &arm_gics[0].gicr_vaddr, init_info->gicr_paddr,
init_info->gicr_size);
arm_gics[0].gicr_size = init_info->gicr_size;
#else /* GIC_VERSION > 2 */
if (init_info->gicc_size < GICC_MIN_SIZE) {
panic("%s: gicc mapping too small %zu\n", __func__,
init_info->gicc_size);
}
arm_map_regs("gicc", &arm_gics[0].gicc_vaddr, init_info->gicc_paddr,
init_info->gicc_size);
arm_gics[0].gicc_size = init_info->gicc_size;
#endif /* GIC_VERSION > 2 */
arm_gic_init_hw();
}
static status_t arm_gic_set_secure_locked(u_int irq, bool secure)
{
#if WITH_LIB_SM
int reg = irq / 32;
uint32_t mask = 1ULL << (irq % 32);
@@ -319,16 +469,17 @@ static status_t arm_gic_set_secure_locked(u_int irq, bool secure) {
return ERR_INVALID_ARGS;
if (secure)
gicreg_write32(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] &= ~mask));
GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] &= ~mask));
else
gicreg_write32(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] |= mask));
GICDREG_WRITE(0, GICD_IGROUPR(reg), (gicd_igroupr[reg] |= mask));
LTRACEF("irq %d, secure %d, GICD_IGROUP%d = %x\n",
irq, secure, reg, gicreg_read32(0, GICD_IGROUPR(reg)));
irq, secure, reg, GICDREG_READ(0, GICD_IGROUPR(reg)));
#endif
return NO_ERROR;
}
static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enable_mask) {
static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enable_mask)
{
u_int reg = irq / 4;
u_int shift = 8 * (irq % 4);
u_int old_val;
@@ -337,68 +488,109 @@ static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enabl
cpu_mask = (cpu_mask & 0xff) << shift;
enable_mask = (enable_mask << shift) & cpu_mask;
old_val = gicreg_read32(0, GICD_ITARGETSR(reg));
old_val = GICDREG_READ(0, GICD_ITARGETSR(reg));
new_val = (gicd_itargetsr[reg] & ~cpu_mask) | enable_mask;
gicreg_write32(0, GICD_ITARGETSR(reg), (gicd_itargetsr[reg] = new_val));
GICDREG_WRITE(0, GICD_ITARGETSR(reg), (gicd_itargetsr[reg] = new_val));
LTRACEF("irq %i, GICD_ITARGETSR%d %x => %x (got %x)\n",
irq, reg, old_val, new_val, gicreg_read32(0, GICD_ITARGETSR(reg)));
irq, reg, old_val, new_val, GICDREG_READ(0, GICD_ITARGETSR(reg)));
return NO_ERROR;
}
static uint8_t arm_gic_get_priority(u_int irq) {
static status_t arm_gic_get_priority(u_int irq)
{
u_int reg = irq / 4;
u_int shift = 8 * (irq % 4);
return (gicreg_read32(0, GICD_IPRIORITYR(reg)) >> shift) & 0xff;
return (GICDREG_READ(0, GICD_IPRIORITYR(reg)) >> shift) & 0xff;
}
static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority) {
static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority)
{
u_int reg = irq / 4;
u_int shift = 8 * (irq % 4);
u_int mask = 0xff << shift;
u_int mask = 0xffU << shift;
uint32_t regval;
regval = gicreg_read32(0, GICD_IPRIORITYR(reg));
#if GIC_VERSION > 2
if (irq < 32) {
uint cpu = arch_curr_cpu_num();
/* On GICv3 IPRIORITY registers are on redistributor */
regval = GICRREG_READ(0, cpu, GICR_IPRIORITYR(reg));
LTRACEF("irq %i, cpu %d: old GICR_IPRIORITYR%d = %x\n", irq, cpu, reg,
regval);
regval = (regval & ~mask) | ((uint32_t)priority << shift);
GICRREG_WRITE(0, cpu, GICR_IPRIORITYR(reg), regval);
LTRACEF("irq %i, cpu %d, new GICD_IPRIORITYR%d = %x, req %x\n",
irq, cpu, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval);
return 0;
}
#endif
regval = GICDREG_READ(0, GICD_IPRIORITYR(reg));
LTRACEF("irq %i, old GICD_IPRIORITYR%d = %x\n", irq, reg, regval);
regval = (regval & ~mask) | ((uint32_t)priority << shift);
gicreg_write32(0, GICD_IPRIORITYR(reg), regval);
GICDREG_WRITE(0, GICD_IPRIORITYR(reg), regval);
LTRACEF("irq %i, new GICD_IPRIORITYR%d = %x, req %x\n",
irq, reg, gicreg_read32(0, GICD_IPRIORITYR(reg)), regval);
irq, reg, GICDREG_READ(0, GICD_IPRIORITYR(reg)), regval);
return 0;
}
status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask) {
status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask)
{
if (irq >= 16) {
return ERR_INVALID_ARGS;
}
#if GIC_VERSION > 2
for (size_t cpu = 0; cpu < SMP_MAX_CPUS; cpu++) {
if (!((cpu_mask >> cpu) & 1)) {
continue;
}
uint64_t val = arm_gicv3_sgir_val(irq, cpu);
GICCREG_WRITE(0, GICC_PRIMARY_SGIR, val);
}
#else /* else GIC_VERSION > 2 */
u_int val =
((flags & ARM_GIC_SGI_FLAG_TARGET_FILTER_MASK) << 24) |
((cpu_mask & 0xff) << 16) |
((flags & ARM_GIC_SGI_FLAG_NS) ? (1U << 15) : 0) |
(irq & 0xf);
if (irq >= 16)
return ERR_INVALID_ARGS;
LTRACEF("GICD_SGIR: %x\n", val);
gicreg_write32(0, GICD_SGIR, val);
GICDREG_WRITE(0, GICD_SGIR, val);
#endif /* else GIC_VERSION > 2 */
return NO_ERROR;
}
status_t mask_interrupt(unsigned int vector) {
status_t mask_interrupt(unsigned int vector)
{
if (vector >= MAX_INT)
return ERR_INVALID_ARGS;
LTRACEF("mask_interrupt %d\n", vector);
if (arm_gic_interrupt_change_allowed(vector))
gic_set_enable(vector, false);
return NO_ERROR;
}
status_t unmask_interrupt(unsigned int vector) {
status_t unmask_interrupt(unsigned int vector)
{
if (vector >= MAX_INT)
return ERR_INVALID_ARGS;
LTRACEF("unmask_interrupt %d\n", vector);
if (arm_gic_interrupt_change_allowed(vector))
gic_set_enable(vector, true);
@@ -406,14 +598,20 @@ status_t unmask_interrupt(unsigned int vector) {
}
static
enum handler_return __platform_irq(struct iframe *frame) {
enum handler_return __platform_irq(struct iframe *frame)
{
// get the current vector
uint32_t iar = gicreg_read32(0, GICC_IAR);
uint32_t iar = GICCREG_READ(0, GICC_PRIMARY_IAR);
unsigned int vector = iar & 0x3ff;
if (vector >= 0x3fe) {
#if WITH_LIB_SM && ARM_GIC_USE_DOORBELL_NS_IRQ
// spurious or non-secure interrupt
return sm_handle_irq();
#else
// spurious
return INT_NO_RESCHEDULE;
#endif
}
THREAD_STATS_INC(interrupts);
@@ -421,7 +619,7 @@ enum handler_return __platform_irq(struct iframe *frame) {
uint cpu = arch_curr_cpu_num();
LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %d pc 0x%lx\n", iar, cpu,
LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %d pc 0x%" PRIxPTR "\n", iar, cpu,
get_current_thread(), vector, (uintptr_t)IFRAME_PC(frame));
// deliver the interrupt
@@ -432,7 +630,7 @@ enum handler_return __platform_irq(struct iframe *frame) {
if (handler->handler)
ret = handler->handler(handler->arg);
gicreg_write32(0, GICC_EOIR, iar);
GICCREG_WRITE(0, GICC_PRIMARY_EOIR, iar);
LTRACEF_LEVEL(2, "cpu %u exit %d\n", cpu, ret);
@@ -441,14 +639,25 @@ enum handler_return __platform_irq(struct iframe *frame) {
return ret;
}
enum handler_return platform_irq(struct iframe *frame);
enum handler_return platform_irq(struct iframe *frame) {
#if WITH_LIB_SM
uint32_t ahppir = gicreg_read32(0, GICC_AHPPIR);
enum handler_return platform_irq(struct iframe *frame)
{
#if WITH_LIB_SM && !ARM_GIC_USE_DOORBELL_NS_IRQ
uint32_t ahppir = GICCREG_READ(0, GICC_PRIMARY_HPPIR);
uint32_t pending_irq = ahppir & 0x3ff;
struct int_handler_struct *h;
uint cpu = arch_curr_cpu_num();
#if ARM_MERGE_FIQ_IRQ
{
uint32_t hppir = GICCREG_READ(0, GICC_HPPIR);
uint32_t pending_fiq = hppir & 0x3ff;
if (pending_fiq < MAX_INT) {
platform_fiq(frame);
return INT_NO_RESCHEDULE;
}
}
#endif
LTRACEF("ahppir %d\n", ahppir);
if (pending_irq < MAX_INT && get_int_handler(pending_irq, cpu)->handler) {
enum handler_return ret = 0;
@@ -465,7 +674,7 @@ enum handler_return platform_irq(struct iframe *frame) {
old_priority = arm_gic_get_priority(pending_irq);
arm_gic_set_priority_locked(pending_irq, 0);
DSB;
irq = gicreg_read32(0, GICC_AIAR) & 0x3ff;
irq = GICCREG_READ(0, GICC_PRIMARY_IAR) & 0x3ff;
arm_gic_set_priority_locked(pending_irq, old_priority);
spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
@@ -475,7 +684,7 @@ enum handler_return platform_irq(struct iframe *frame) {
ret = h->handler(h->arg);
else
TRACEF("unexpected irq %d != %d may get lost\n", irq, pending_irq);
gicreg_write32(0, GICC_AEOIR, irq);
GICCREG_WRITE(0, GICC_PRIMARY_EOIR, irq);
return ret;
}
return sm_handle_irq();
@@ -484,8 +693,8 @@ enum handler_return platform_irq(struct iframe *frame) {
#endif
}
void platform_fiq(struct iframe *frame);
void platform_fiq(struct iframe *frame) {
void platform_fiq(struct iframe *frame)
{
#if WITH_LIB_SM
sm_handle_fiq();
#else
@@ -494,28 +703,40 @@ void platform_fiq(struct iframe *frame) {
}
#if WITH_LIB_SM
static status_t arm_gic_get_next_irq_locked(u_int min_irq, bool per_cpu) {
static status_t arm_gic_get_next_irq_locked(u_int min_irq, uint type)
{
#if ARM_GIC_USE_DOORBELL_NS_IRQ
if (type == TRUSTY_IRQ_TYPE_DOORBELL && min_irq <= ARM_GIC_DOORBELL_IRQ) {
doorbell_enabled = true;
return ARM_GIC_DOORBELL_IRQ;
}
#else
u_int irq;
u_int max_irq = per_cpu ? GIC_MAX_PER_CPU_INT : MAX_INT;
u_int max_irq = type == TRUSTY_IRQ_TYPE_PER_CPU ? GIC_MAX_PER_CPU_INT :
type == TRUSTY_IRQ_TYPE_NORMAL ? MAX_INT : 0;
uint cpu = arch_curr_cpu_num();
if (!per_cpu && min_irq < GIC_MAX_PER_CPU_INT)
if (type == TRUSTY_IRQ_TYPE_NORMAL && min_irq < GIC_MAX_PER_CPU_INT)
min_irq = GIC_MAX_PER_CPU_INT;
for (irq = min_irq; irq < max_irq; irq++)
if (get_int_handler(irq, cpu)->handler)
return irq;
#endif
return SM_ERR_END_OF_INPUT;
}
long smc_intc_get_next_irq(smc32_args_t *args) {
long smc_intc_get_next_irq(struct smc32_args *args)
{
status_t ret;
spin_lock_saved_state_t state;
spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
#if !ARM_GIC_USE_DOORBELL_NS_IRQ
arm_gic_non_secure_interrupts_frozen = true;
#endif
ret = arm_gic_get_next_irq_locked(args->params[0], args->params[1]);
LTRACEF("min_irq %d, per_cpu %d, ret %d\n",
args->params[0], args->params[1], ret);
@@ -525,120 +746,49 @@ long smc_intc_get_next_irq(smc32_args_t *args) {
return ret;
}
static u_long enabled_fiq_mask[BITMAP_NUM_WORDS(MAX_INT)];
static void bitmap_update_locked(u_long *bitmap, u_int bit, bool set) {
u_long mask = 1UL << BITMAP_BIT_IN_WORD(bit);
bitmap += BITMAP_WORD(bit);
if (set)
*bitmap |= mask;
else
*bitmap &= ~mask;
void sm_intc_enable_interrupts(void)
{
#if ARM_GIC_USE_DOORBELL_NS_IRQ
GICCREG_WRITE(0, icc_igrpen1_el1, 1); /* Enable secure Group 1 */
DSB;
#endif
}
long smc_intc_request_fiq(smc32_args_t *args) {
u_int fiq = args->params[0];
bool enable = args->params[1];
spin_lock_saved_state_t state;
dprintf(SPEW, "%s: fiq %d, enable %d\n", __func__, fiq, enable);
spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
arm_gic_set_secure_locked(fiq, true);
arm_gic_set_target_locked(fiq, ~0, ~0);
arm_gic_set_priority_locked(fiq, 0);
gic_set_enable(fiq, enable);
bitmap_update_locked(enabled_fiq_mask, fiq, enable);
dprintf(SPEW, "%s: fiq %d, enable %d done\n", __func__, fiq, enable);
spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
return NO_ERROR;
}
static u_int current_fiq[8] = { 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff };
static bool update_fiq_targets(u_int cpu, bool enable, u_int triggered_fiq, bool resume_gicd) {
u_int i, j;
u_long mask;
u_int fiq;
bool smp = arm_gic_max_cpu() > 0;
bool ret = false;
spin_lock(&gicd_lock); /* IRQs and FIQs are already masked */
for (i = 0; i < BITMAP_NUM_WORDS(MAX_INT); i++) {
mask = enabled_fiq_mask[i];
while (mask) {
j = _ffz(~mask);
mask &= ~(1UL << j);
fiq = i * BITMAP_BITS_PER_WORD + j;
if (fiq == triggered_fiq)
ret = true;
LTRACEF("cpu %d, irq %i, enable %d\n", cpu, fiq, enable);
if (smp)
arm_gic_set_target_locked(fiq, 1U << cpu, enable ? ~0 : 0);
if (!smp || resume_gicd)
gic_set_enable(fiq, enable);
}
}
spin_unlock(&gicd_lock);
return ret;
}
static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd) {
status_t sm_intc_fiq_enter(void)
{
u_int cpu = arch_curr_cpu_num();
ASSERT(cpu < 8);
update_fiq_targets(cpu, resume_gicc, ~0, resume_gicd);
}
status_t sm_intc_fiq_enter(void) {
u_int cpu = arch_curr_cpu_num();
u_int irq = gicreg_read32(0, GICC_IAR) & 0x3ff;
bool fiq_enabled;
ASSERT(cpu < 8);
#if GIC_VERSION > 2
u_int irq = GICCREG_READ(0, icc_iar0_el1) & 0x3ff;
#else
u_int irq = GICCREG_READ(0, GICC_IAR) & 0x3ff;
#endif
LTRACEF("cpu %d, irq %i\n", cpu, irq);
if (irq >= 1020) {
LTRACEF("spurious fiq: cpu %d, old %d, new %d\n", cpu, current_fiq[cpu], irq);
#if ARM_GIC_USE_DOORBELL_NS_IRQ
uint64_t val = arm_gicv3_sgir_val(ARM_GIC_DOORBELL_IRQ, cpu);
GICCREG_WRITE(0, icc_igrpen1_el1, 0); /* Disable secure Group 1 */
DSB;
if (doorbell_enabled) {
LTRACEF("GICD_SGIR: %" PRIx64 "\n", val);
GICCREG_WRITE(0, icc_asgi1r_el1, val);
}
#else
LTRACEF("spurious fiq: cpu %d, new %d\n", cpu, irq);
#endif
return ERR_NO_MSG;
}
fiq_enabled = update_fiq_targets(cpu, false, irq, false);
gicreg_write32(0, GICC_EOIR, irq);
#if GIC_VERSION > 2
GICCREG_WRITE(0, icc_eoir0_el1, irq);
#else
GICCREG_WRITE(0, GICC_EOIR, irq);
#endif
if (current_fiq[cpu] != 0x3ff) {
dprintf(INFO, "more than one fiq active: cpu %d, old %d, new %d\n", cpu, current_fiq[cpu], irq);
return ERR_ALREADY_STARTED;
}
if (!fiq_enabled) {
dprintf(INFO, "got disabled fiq: cpu %d, new %d\n", cpu, irq);
return ERR_NOT_READY;
}
current_fiq[cpu] = irq;
return 0;
}
void sm_intc_fiq_exit(void) {
u_int cpu = arch_curr_cpu_num();
ASSERT(cpu < 8);
LTRACEF("cpu %d, irq %i\n", cpu, current_fiq[cpu]);
if (current_fiq[cpu] == 0x3ff) {
dprintf(INFO, "%s: no fiq active, cpu %d\n", __func__, cpu);
return;
}
update_fiq_targets(cpu, true, current_fiq[cpu], false);
current_fiq[cpu] = 0x3ff;
}
#endif

View File

@@ -0,0 +1,345 @@
/*
* Copyright (c) 2012-2019 LK Trusty Authors. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <platform/gic.h>
#include <lk/reg.h>
#if ARCH_ARM
#include <arch/arm.h>
#endif
#if ARCH_ARM64
#include <arch/arm64.h>
#endif
#ifdef ARCH_ARM
/*
* AArch32 does not have 64 bit mmio support, but the gic spec allows 32 bit
* upper and lower access to _most_ 64 bit gic registers (not GICR_VSGIPENDR,
* GICR_VSGIR or GITS_SGIR).
*/
/* TODO: add mmio_read32 when needed */
static inline void mmio_write64(volatile uint64_t *ptr64, uint64_t val) {
volatile uint32_t *ptr = (volatile uint32_t *)ptr64;
mmio_write32(ptr, (uint32_t)val);
mmio_write32(ptr + 1, val >> 32);
}
#endif
struct arm_gic {
vaddr_t gicc_vaddr;
size_t gicc_size;
vaddr_t gicd_vaddr;
size_t gicd_size;
vaddr_t gicr_vaddr;
size_t gicr_size;
};
#define NUM_ARM_GICS 1
extern struct arm_gic arm_gics[NUM_ARM_GICS];
#if GIC_VERSION > 2
#if WITH_LIB_SM
#define ARM_GIC_USE_DOORBELL_NS_IRQ 1
#define ARM_GIC_DOORBELL_IRQ 13
#endif
/* GICv3/v4 */
#define GICV3_IRQ_GROUP_GRP0S 0
#define GICV3_IRQ_GROUP_GRP1NS 1
#define GICV3_IRQ_GROUP_GRP1S 2
#ifndef ARM_GIC_SELECTED_IRQ_GROUP
#define ARM_GIC_SELECTED_IRQ_GROUP GRP1NS
#endif
#define COMBINE2(a, b) a ## b
#define XCOMBINE2(a, b) COMBINE2(a,b)
#define GICV3_IRQ_GROUP XCOMBINE2(GICV3_IRQ_GROUP_, ARM_GIC_SELECTED_IRQ_GROUP)
/*
* In ARMv8 for GICv3/v4, ARM suggest to use system register
* to access GICC instead of memory map.
*/
#ifdef ARCH_ARM64
#define GICCREG_READ(gic, reg) ARM64_READ_SYSREG(reg)
#define GICCREG_WRITE(gic, reg, val) ARM64_WRITE_SYSREG(reg, (uint64_t)val)
#else /* ARCH_ARM64 */
/* For 32bit mode, use different way to access registers */
#define GICCREG_READ(gic, reg) COMBINE2(arm_read_,reg)()
#define GICCREG_WRITE(gic, reg, val) COMBINE2(arm_write_,reg)(val)
GEN_CP15_REG_FUNCS(icc_ctlr_el1, 0, c12, c12, 4);
GEN_CP15_REG_FUNCS(icc_pmr_el1, 0, c4, c6, 0);
GEN_CP15_REG_FUNCS(icc_bpr0_el1, 0, c12, c8, 3);
GEN_CP15_REG_FUNCS(icc_iar0_el1, 0, c12, c8, 0);
GEN_CP15_REG_FUNCS(icc_eoir0_el1, 0, c12, c8, 1);
GEN_CP15_REG_FUNCS(icc_rpr_el1, 0, c12, c11, 3);
GEN_CP15_REG_FUNCS(icc_hppir0_el1, 0, c12, c8, 2);
GEN_CP15_REG_FUNCS(icc_bpr1_el1, 0, c12, c12, 3);
GEN_CP15_REG_FUNCS(icc_iar1_el1, 0, c12, c12, 0);
GEN_CP15_REG_FUNCS(icc_eoir1_el1, 0, c12, c12, 1);
GEN_CP15_REG_FUNCS(icc_hppir1_el1, 0, c12, c12, 2);
GEN_CP15_REG_FUNCS(icc_dir_el1, 0, c12, c11, 1);
GEN_CP15_REG_FUNCS(icc_sre_el1, 0, c12, c12, 5);
GEN_CP15_REG_FUNCS(icc_igrpen0_el1, 0, c12, c12, 6);
GEN_CP15_REG_FUNCS(icc_igrpen1_el1, 0, c12, c12, 7);
GEN_CP15_REG_FUNCS(icc_ap0r0_el1, 0, c12, c8, 4);
GEN_CP15_REG_FUNCS(icc_ap0r1_el1, 0, c12, c8, 5);
GEN_CP15_REG_FUNCS(icc_ap0r2_el1, 0, c12, c8, 6);
GEN_CP15_REG_FUNCS(icc_ap0r3_el1, 0, c12, c8, 7);
GEN_CP15_REG_FUNCS(icc_ap1r0_el1, 0, c12, c9, 0);
GEN_CP15_REG_FUNCS(icc_ap1r1_el1, 0, c12, c9, 1);
GEN_CP15_REG_FUNCS(icc_ap1r2_el1, 0, c12, c9, 2);
GEN_CP15_REG_FUNCS(icc_ap1r3_el1, 0, c12, c9, 3);
GEN_CP15_REG64_FUNCS(icc_sgi1r_el1, 0, c12);
GEN_CP15_REG64_FUNCS(icc_asgi1r_el1, 1, c12);
GEN_CP15_REG64_FUNCS(icc_sgi0r_el1, 2, c12);
#endif /* ARCH_ARM64 */
#if GICV3_IRQ_GROUP == GICV3_IRQ_GROUP_GRP0S
#define GICC_PRIMARY_HPPIR icc_hppir0_el1
#define GICC_PRIMARY_IAR icc_iar0_el1
#define GICC_PRIMARY_EOIR icc_eoir0_el1
#define GICC_PRIMARY_SGIR icc_sgi0r_el1
#else
#define GICC_PRIMARY_HPPIR icc_hppir1_el1
#define GICC_PRIMARY_IAR icc_iar1_el1
#define GICC_PRIMARY_EOIR icc_eoir1_el1
#define GICC_PRIMARY_SGIR icc_sgi1r_el1
#endif
#define GICC_LIMIT (0x0000)
#else /* GIC_VERSION > 2 */
#ifndef GICC_OFFSET
#define GICC_OFFSET (0x0000)
#endif
#define GICCREG_READ(gic, reg) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(reg >= GICC_OFFSET); \
ASSERT(reg < GICC_LIMIT); \
mmio_read32((volatile uint32_t *)(arm_gics[(gic)].gicc_vaddr + ((reg) - GICC_OFFSET))); \
})
#define GICCREG_WRITE(gic, reg, val) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(reg >= GICC_OFFSET); \
ASSERT(reg < GICC_LIMIT); \
mmio_write32((volatile uint32_t *)(arm_gics[(gic)].gicc_vaddr + ((reg) - GICC_OFFSET)), (val)); \
})
/* main cpu regs */
#define GICC_CTLR (GICC_OFFSET + 0x0000)
#define GICC_PMR (GICC_OFFSET + 0x0004)
#define GICC_BPR (GICC_OFFSET + 0x0008)
#define GICC_IAR (GICC_OFFSET + 0x000c)
#define GICC_EOIR (GICC_OFFSET + 0x0010)
#define GICC_RPR (GICC_OFFSET + 0x0014)
#define GICC_HPPIR (GICC_OFFSET + 0x0018)
#define GICC_ABPR (GICC_OFFSET + 0x001c)
#define GICC_AIAR (GICC_OFFSET + 0x0020)
#define GICC_AEOIR (GICC_OFFSET + 0x0024)
#define GICC_AHPPIR (GICC_OFFSET + 0x0028)
#define GICC_APR(n) (GICC_OFFSET + 0x00d0 + (n) * 4)
#define GICC_NSAPR(n) (GICC_OFFSET + 0x00e0 + (n) * 4)
#define GICC_IIDR (GICC_OFFSET + 0x00fc)
#if 0 /* GICC_DIR is not currently used by anything */
#define GICC_DIR (GICC_OFFSET + 0x1000)
#endif
#define GICC_LIMIT (GICC_OFFSET + 0x1000)
#define GICC_MIN_SIZE (GICC_LIMIT - GICC_OFFSET)
#if WITH_LIB_SM
#define GICC_PRIMARY_HPPIR GICC_AHPPIR
#define GICC_PRIMARY_IAR GICC_AIAR
#define GICC_PRIMARY_EOIR GICC_AEOIR
#else
#define GICC_PRIMARY_HPPIR GICC_HPPIR
#define GICC_PRIMARY_IAR GICC_IAR
#define GICC_PRIMARY_EOIR GICC_EOIR
#endif
#endif /* GIC_VERSION > 2 */
#ifndef GICD_OFFSET
#define GICD_OFFSET (GICC_LIMIT)
#endif
#define GICDREG_READ(gic, reg) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(reg >= GICD_OFFSET); \
ASSERT(reg < GICD_LIMIT); \
mmio_read32((volatile uint32_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET))); \
})
#define GICDREG_WRITE(gic, reg, val) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(reg >= GICD_OFFSET); \
ASSERT(reg < GICD_LIMIT); \
LTRACEF_LEVEL(3, "GICDREG_WRITE base vaddr %#lx gic %d reg 0x%x val 0x%x\n", arm_gics[gic].gicd_vaddr, gic, reg, val); \
LTRACEF_LEVEL(3, "final address = %#lx\n", arm_gics[gic].gicd_vaddr + ((reg) - GICD_OFFSET)); \
mmio_write32((volatile uint32_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET)), (val)); \
})
/* distribution regs */
#define GICD_CTLR (GICD_OFFSET + 0x000)
#define GICD_TYPER (GICD_OFFSET + 0x004)
#define GICD_IIDR (GICD_OFFSET + 0x008)
#define GICD_IGROUPR(n) (GICD_OFFSET + 0x080 + (n) * 4)
#define GICD_ISENABLER(n) (GICD_OFFSET + 0x100 + (n) * 4)
#define GICD_ICENABLER(n) (GICD_OFFSET + 0x180 + (n) * 4)
#define GICD_ISPENDR(n) (GICD_OFFSET + 0x200 + (n) * 4)
#define GICD_ICPENDR(n) (GICD_OFFSET + 0x280 + (n) * 4)
#define GICD_ISACTIVER(n) (GICD_OFFSET + 0x300 + (n) * 4)
#define GICD_ICACTIVER(n) (GICD_OFFSET + 0x380 + (n) * 4)
#define GICD_IPRIORITYR(n) (GICD_OFFSET + 0x400 + (n) * 4)
#define GICD_ITARGETSR(n) (GICD_OFFSET + 0x800 + (n) * 4)
#define GICD_ICFGR(n) (GICD_OFFSET + 0xc00 + (n) * 4)
#define GICD_NSACR(n) (GICD_OFFSET + 0xe00 + (n) * 4)
#define GICD_SGIR (GICD_OFFSET + 0xf00)
#define GICD_CPENDSGIR(n) (GICD_OFFSET + 0xf10 + (n) * 4)
#define GICD_SPENDSGIR(n) (GICD_OFFSET + 0xf20 + (n) * 4)
#if GIC_VERSION <= 2
/* for v3 and higher, these are defined later */
#define GICD_LIMIT (GICD_OFFSET + 0x1000)
#define GICD_MIN_SIZE (GICD_LIMIT - GICD_OFFSET)
#endif /* GIC_VERSION <= 2 */
/* GICD_CTRL Register
* Non-Secure Only_a_Single two_Security
* (1U << 8) RES0 nASSGIreq RES0
* (1U << 7) RES0 E1NWF E1NWF
* (1U << 5) RES0 RES0 ARE_NS
* (1U << 4) ARE_NS ARE ARE_S
* (1U << 2) RES0 RES0 ENABLE_G1S
* (1U << 1) ENABLE_G1A ENABLE_G1 ENABLE_G1NS
* (1U << 0) ENABLE_G1 ENABLE_G0 ENABLE_G0
*/
#define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTRL_E1NWF (1U << 7)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 5)
#define GICD_CTLR_ARE_S (1U << 4)
#define GICD_CTLR_ENABLE_G1S (1U << 2)
#define GICD_CTLR_ENABLE_G1NS (1U << 1)
#define GICD_CTLR_ENABLE_G0 (1U << 0)
#if GIC_VERSION > 2
/* some registers of GICD are 64 bit */
#define GICDREG_READ64(gic, reg) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(reg >= GICD_OFFSET); \
ASSERT(reg < GICD_LIMIT); \
mmio_read64((volatile uint64_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET))); \
})
#define GICDREG_WRITE64(gic, reg, val) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(reg >= GICD_OFFSET); \
ASSERT(reg < GICD_LIMIT); \
mmio_write64((volatile uint64_t *)(arm_gics[(gic)].gicd_vaddr + ((reg) - GICD_OFFSET)), (val)); \
})
/* GICv3/v4 Distributor interface */
#define GICD_STATUSR (GICD_OFFSET + 0x0010)
#define GICD_SETSPI_NSR (GICD_OFFSET + 0x0040)
#define GICD_CLRSPI_NSR (GICD_OFFSET + 0x0048)
#define GICD_SETSPI_SR (GICD_OFFSET + 0x0050)
#define GICD_CLRSPI_SR (GICD_OFFSET + 0x0058)
#define GICD_IGRPMODR(n) (GICD_OFFSET + 0x0D00 + (n) * 4)
#define GICD_IROUTER(n) (GICD_OFFSET + 0x6000 + (n) * 8)
#define GICD_CIDR0 (GICD_OFFSET + 0xfff0)
#define GICD_CIDR1 (GICD_OFFSET + 0xfff4)
#define GICD_CIDR2 (GICD_OFFSET + 0xfff8)
#define GICD_CIDR3 (GICD_OFFSET + 0xfffc)
#define GICD_PIDR0 (GICD_OFFSET + 0xffe0)
#define GICD_PIDR1 (GICD_OFFSET + 0xffe4)
#define GICD_PIDR2 (GICD_OFFSET + 0xffe8)
#define GICD_PIDR3 (GICD_OFFSET + 0xffec)
#define GICD_LIMIT (GICD_OFFSET + 0x10000)
#define GICD_MIN_SIZE (GICD_LIMIT - GICD_OFFSET)
/* GICv3/v4 Redistrubutor interface */
#if GIC_VERSION == 3
#define GICR_CPU_OFFSET(cpu) ((cpu) * 0x20000)
#endif
#if GIC_VERSION == 4
#define GICR_CPU_OFFSET(cpu) ((cpu) * 0x40000)
#endif
#ifndef GICR_OFFSET
#define GICR_OFFSET (GICD_LIMIT)
#endif
#define GICRREG_READ(gic, cpu, reg) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(cpu < SMP_MAX_CPUS); \
ASSERT(reg >= GICR_OFFSET); \
ASSERT(reg < GICR_LIMIT); \
mmio_read32((volatile uint32_t *)(arm_gics[(gic)].gicr_vaddr + GICR_CPU_OFFSET(cpu) + ((reg) - GICR_OFFSET))); \
})
#define GICRREG_WRITE(gic, cpu, reg, val) ({ \
ASSERT(gic < NUM_ARM_GICS); \
ASSERT(cpu < SMP_MAX_CPUS); \
ASSERT(reg >= GICR_OFFSET); \
ASSERT(reg < GICR_LIMIT); \
LTRACEF_LEVEL(3, "GICRREG_WRITE base vaddr %#lx gic %d cpu %d reg 0x%x val 0x%x\n", arm_gics[gic].gicr_vaddr, gic, cpu, reg, val); \
LTRACEF_LEVEL(3, "final address = %#lx\n", arm_gics[gic].gicr_vaddr + GICR_CPU_OFFSET(cpu) + ((reg) - GICR_OFFSET)); \
mmio_write32((volatile uint32_t *)(arm_gics[(gic)].gicr_vaddr + GICR_CPU_OFFSET(cpu) + ((reg) - GICR_OFFSET)), (val)); \
})
#define GICR_CTRL (GICR_OFFSET + 0x0000)
#define GICR_IIDR (GICR_OFFSET + 0x0004)
#define GICR_TYPER (GICR_OFFSET + 0x0008)
#define GICR_STATUSR (GICR_OFFSET + 0x0010)
#define GICR_WAKER (GICR_OFFSET + 0x0014)
/* The following GICR registers are on separate 64KB page */
#define GICR_SGI_OFFSET (GICR_OFFSET + 0x10000)
#define GICR_IGROUPR0 (GICR_SGI_OFFSET + 0x0080)
#define GICR_ISENABLER0 (GICR_SGI_OFFSET + 0x0100)
#define GICR_ICENABLER0 (GICR_SGI_OFFSET + 0x0180)
#define GICR_ISPENDR0 (GICR_SGI_OFFSET + 0x0200)
#define GICR_ICPENDR0 (GICR_SGI_OFFSET + 0x0280)
#define GICR_ISACTIVER0 (GICR_SGI_OFFSET + 0x0300)
#define GICR_ICACTIVER0 (GICR_SGI_OFFSET + 0x0380)
#define GICR_IPRIORITYR(n) (GICR_SGI_OFFSET + 0x0400 + (n) * 4)
#define GICR_ICFGR(n) (GICR_SGI_OFFSET + 0x0C00 + (n) * 4)
#define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00)
#define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00)
#define GICR_LIMIT (GICR_SGI_OFFSET + 0x1000)
#define GICR_MIN_SIZE (0x10000)
#endif /* GIC_VERSION > 2 */
// XXX: from trusty macros.h
#define ROUND_UP(n, d) (((n) + (size_t)(d) - 1) & ~((size_t)(d) - 1))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))

View File

@@ -0,0 +1,385 @@
/*
* Copyright (c) 2019 LK Trusty Authors. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <arch/ops.h>
#include <assert.h>
#include <lk/bits.h>
#include <lk/trace.h>
#include <stdint.h>
#include <inttypes.h>
#define LOCAL_TRACE 0
#include <dev/interrupt/arm_gic.h>
#include "arm_gic_common.h"
#include "gic_v3.h"
#define WAKER_QSC_BIT (0x1u << 31)
#define WAKER_CA_BIT (0x1u << 2)
#define WAKER_PS_BIT (0x1u << 1)
#define WAKER_SL_BIT (0x1u << 0)
static void gicv3_gicr_exit_sleep(uint32_t cpu) {
uint32_t val = GICRREG_READ(0, cpu, GICR_WAKER);
if (val & WAKER_QSC_BIT) {
/* clear sleep bit */
GICRREG_WRITE(0, cpu, GICR_WAKER, val & ~WAKER_SL_BIT);
while (GICRREG_READ(0, cpu, GICR_WAKER) & WAKER_QSC_BIT) {
}
}
}
static void gicv3_gicr_mark_awake(uint32_t cpu) {
uint32_t val = GICRREG_READ(0, cpu, GICR_WAKER);
if (val & WAKER_CA_BIT) {
/* mark CPU as awake */
GICRREG_WRITE(0, cpu, GICR_WAKER, val & ~WAKER_PS_BIT);
while (GICRREG_READ(0, cpu, GICR_WAKER) & WAKER_CA_BIT) {
}
}
}
#if GIC600
/*
* GIC-600 implements an additional GICR power control register
*/
#define GICR_PWRR (GICR_OFFSET + 0x0024)
#define PWRR_ON (0x0u << 0)
#define PWRR_OFF (0x1u << 0)
#define PWRR_RDGPD (0x1u << 2)
#define PWRR_RDGPO (0x1u << 3)
#define PWRR_RDGP_MASK (PWRR_RDGPD | PWRR_RDGPO)
static void gicv3_gicr_power_on(uint32_t cpu) {
/* Initiate power up */
GICRREG_WRITE(0, cpu, GICR_PWRR, PWRR_ON);
/* wait until it is complete (both bits are clear) */
while (GICRREG_READ(0, cpu, GICR_PWRR) & PWRR_RDGP_MASK) {
}
}
static void gicv3_gicr_off(uint32_t cpu) {
/* initiate power down */
GICRREG_WRITE(0, cpu, GICR_PWRR, PWRR_OFF);
/* wait until it is complete (both bits are set) */
while ((GICRREG_READ(0, cpu, GICR_PWRR) & PWRR_RDGP_MASK) !=
PWRR_RDGP_MASK) {
}
}
#else /* GIC600 */
static void gicv3_gicr_power_on(uint32_t cpu) {}
static void gicv3_gicr_power_off(uint32_t cpu) {}
#endif /* GIC600 */
static void arm_gicv3_wait_for_gicr_write_complete(uint cpu) {
/* wait until write complete */
while (GICRREG_READ(0, cpu, GICR_CTRL) & (1<<31)) { // GICR_CTLR.RWP
}
}
static void gicv3_gicr_init(void) {
uint32_t cpu = arch_curr_cpu_num();
gicv3_gicr_exit_sleep(cpu);
gicv3_gicr_power_on(cpu);
gicv3_gicr_mark_awake(cpu);
#if !WITH_LIB_SM
// redistributer config: configure sgi/ppi as non-secure group 1.
GICRREG_WRITE(0, cpu, GICR_IGROUPR0, ~0);
GICRREG_WRITE(0, cpu, GICR_IGRPMODR0, 0);
arm_gicv3_wait_for_gicr_write_complete(cpu);
#endif
// redistributer config: clear and mask sgi/ppi.
GICRREG_WRITE(0, cpu, GICR_ICENABLER0, ~0);
GICRREG_WRITE(0, cpu, GICR_ICPENDR0, ~0);
arm_gicv3_wait_for_gicr_write_complete(cpu);
}
void arm_gicv3_wait_for_write_complete(void) {
/* wait until write complete */
while (GICDREG_READ(0, GICD_CTLR) & GICD_CTLR_RWP) {
}
}
static void gicv3_gicd_ctrl_write(uint32_t val) {
/* write CTRL register */
GICDREG_WRITE(0, GICD_CTLR, val);
/* wait until write complete */
arm_gicv3_wait_for_write_complete();
}
static void gicv3_gicd_setup_irq_group(uint32_t vector, uint32_t grp) {
uint32_t val;
uint32_t mask;
ASSERT((vector >= 32) && (vector < MAX_INT));
mask = (0x1u << (vector % 32));
val = GICDREG_READ(0, GICD_IGROUPR(vector / 32));
if (grp & 0x1u) {
val |= mask;
} else {
val &= ~mask;
}
GICDREG_WRITE(0, GICD_IGROUPR(vector / 32), val);
val = GICDREG_READ(0, GICD_IGRPMODR(vector / 32));
if (grp & 0x2u) {
val |= mask;
} else {
val &= ~mask;
}
GICDREG_WRITE(0, GICD_IGRPMODR(vector / 32), val);
}
static void gicv3_gicd_setup_default_group(uint32_t grp) {
uint32_t i;
/* Assign all interrupts to selected group */
for (i = 32; i < MAX_INT; i += 32) {
GICDREG_WRITE(0, GICD_IGROUPR(i / 32), (grp & 0x1u) ? ~0U : 0);
GICDREG_WRITE(0, GICD_IGRPMODR(i / 32), (grp & 0x2u) ? ~0U : 0);
}
}
static bool gicv3_gicd_security_disabled(void) {
return GICDREG_READ(0, GICD_CTLR) & GICD_CTLR_DS;
}
static void gicv3_gicr_setup_irq_group(uint32_t vector, uint32_t grp) {
uint32_t val;
uint32_t mask;
uint32_t cpu = arch_curr_cpu_num();
ASSERT(vector < 32);
mask = (0x1u << vector);
val = GICRREG_READ(0, cpu, GICR_IGROUPR0);
if (grp & 0x1u) {
val |= mask;
} else {
val &= ~mask;
}
GICRREG_WRITE(0, cpu, GICR_IGROUPR0, val);
val = GICRREG_READ(0, cpu, GICR_IGRPMODR0);
if (grp & 0x2u) {
val |= mask;
} else {
val &= ~mask;
}
GICRREG_WRITE(0, cpu, GICR_IGRPMODR0, val);
}
static void gicv3_gicr_setup_default_group(uint32_t grp) {
uint32_t cpu = arch_curr_cpu_num();
GICRREG_WRITE(0, cpu, GICR_IGROUPR0, (grp & 0x1u) ? ~0U : 0);
GICRREG_WRITE(0, cpu, GICR_IGRPMODR0, (grp & 0x2u) ? ~0U : 0);
}
void arm_gicv3_init(void) {
bool disabled_security = gicv3_gicd_security_disabled();
uint32_t pidr2 = GICDREG_READ(0, GICD_PIDR2);
uint32_t rev = BITS_SHIFT(pidr2, 7, 4);
if (rev != 3 && rev != 4) {
panic("GIC not v3 or v4, pidr %#x rev %#x\n", pidr2, rev);
}
uint32_t typer = GICDREG_READ(0, GICD_TYPER);
uint32_t gic_max_int = (BITS(typer, 4, 0) + 1) * 32;
printf("GICv3 detected: rev %u, max interrupts %u, TYPER %#x\n", rev, gic_max_int, typer);
if (gic_max_int > MAX_INT) {
gic_max_int = MAX_INT;
}
if (disabled_security) {
printf("GICv3 security disabled\n");
}
#if !WITH_LIB_SM
/* non-TZ */
/* Disable all groups before making changes */
gicv3_gicd_ctrl_write(GICDREG_READ(0, GICD_CTLR) & ~0x7U);
for (uint32_t i = 0; i < gic_max_int; i += 32) {
GICDREG_WRITE(0, GICD_ICENABLER(i / 32), ~0U);
GICDREG_WRITE(0, GICD_ICPENDR(i / 32), ~0U);
GICDREG_WRITE(0, GICD_IGROUPR(i / 32), ~0U);
GICDREG_WRITE(0, GICD_IGRPMODR(i / 32), ~0U);
}
arm_gicv3_wait_for_write_complete();;
#endif
/* Enable distributor with ARE, group 1 enable */
if (disabled_security == false) {
gicv3_gicd_ctrl_write(GICDREG_READ(0, GICD_CTLR) |
(GICD_CTLR_ENABLE_G0 | GICD_CTLR_ENABLE_G1NS | GICD_CTLR_ARE_S));
} else {
// TODO: is there a reasonable other solution here?
}
/* Enable selected group */
uint32_t grp_mask = (0x1u << GICV3_IRQ_GROUP);
gicv3_gicd_ctrl_write(GICDREG_READ(0, GICD_CTLR) | grp_mask);
arm_gicv3_wait_for_write_complete();
#if !WITH_LIB_SM
/* Direct SPI interrupts to core 0 */
for (uint32_t i = 32; i < gic_max_int; i++) {
GICDREG_WRITE64(0, GICD_IROUTER(i), 0);
}
#endif
}
void arm_gicv3_init_percpu(void) {
#if WITH_LIB_SM
/* TZ */
/* Initialized by ATF */
#if ARM_GIC_USE_DOORBELL_NS_IRQ
gicv3_gicr_setup_irq_group(ARM_GIC_DOORBELL_IRQ, GICV3_IRQ_GROUP_GRP1NS);
#endif
#else
/* non-TZ */
/* Init registributor interface */
gicv3_gicr_init();
/* Enable CPU interface access */
// TODO: do we need to set bit 1 and 2? (IRQ/FIQ bypass)
GICCREG_WRITE(0, icc_sre_el1, (GICCREG_READ(0, icc_sre_el1) | 0x7));
#endif
/* Set priority mask to maximum to allow all priorities */
GICCREG_WRITE(0, icc_pmr_el1, 0xFF);
/* enable selected percpu group */
if (GICV3_IRQ_GROUP == 0) {
GICCREG_WRITE(0, icc_igrpen0_el1, 1);
} else {
GICCREG_WRITE(0, icc_igrpen1_el1, 1);
}
}
void arm_gicv3_configure_irq_locked(unsigned int cpu, unsigned int vector) {
uint32_t grp = GICV3_IRQ_GROUP;
ASSERT(vector < MAX_INT);
if (vector < 32) {
/* PPIs */
gicv3_gicr_setup_irq_group(vector, grp);
} else {
/* SPIs */
gicv3_gicd_setup_irq_group(vector, grp);
}
}
static uint32_t enabled_spi_mask[DIV_ROUND_UP(MAX_INT, 32)];
static uint32_t enabled_ppi_mask[SMP_MAX_CPUS];
void arm_gicv3_suspend_cpu(unsigned int cpu) {
uint32_t i;
ASSERT(cpu < SMP_MAX_CPUS);
if (cpu == 0) {
/* also save gicd */
for (i = 32; i < MAX_INT; i += 32) {
enabled_spi_mask[i / 32] = GICDREG_READ(0, GICD_ISENABLER(i / 32));
}
}
enabled_ppi_mask[cpu] = GICRREG_READ(0, cpu, GICR_ISENABLER0);
}
void arm_gicv3_resume_cpu_locked(unsigned int cpu, bool gicd) {
uint32_t i;
ASSERT(cpu < SMP_MAX_CPUS);
GICRREG_WRITE(0, cpu, GICR_ISENABLER0, enabled_ppi_mask[cpu]);
if (gicd) {
/* also resume gicd */
for (i = 32; i < MAX_INT; i += 32) {
GICDREG_WRITE(0, GICD_ISENABLER(i / 32), enabled_spi_mask[i / 32]);
}
}
}
#define SGIR_AFF1_SHIFT (16)
#define SGIR_AFF2_SHIFT (32)
#define SGIR_AFF3_SHIFT (48)
#define SGIR_IRQ_SHIFT (24)
#define SGIR_RS_SHIFT (44)
#define SGIR_TARGET_LIST_SHIFT (0)
#define SGIR_ASSEMBLE(val, shift) ((uint64_t)val << shift)
uint64_t arm_gicv3_sgir_val(u_int irq, size_t cpu_num) {
DEBUG_ASSERT(irq < 16);
struct {
uint8_t aff0;
uint8_t aff1;
uint8_t aff2;
uint8_t aff3;
} affs = {0};
#if __aarch64__
uint64_t mpidr = arm64_cpu_num_to_mpidr(cpu_num);
affs.aff0 = mpidr & 0xff;
affs.aff1 = (mpidr >> 8) & 0xff;
affs.aff2 = (mpidr >> 16) & 0xff;
affs.aff3 = (mpidr >> 32) & 0xff;
#else
// TODO: fix for arm32
affs.aff0 = cpu_num;
#endif
// TODO: configure this based on ICC_CTLR_EL1.RSS
uint8_t range_selector = affs.aff0 >> 4;
uint16_t target_list = 1U << (affs.aff0 & 0xf);
uint64_t sgir = SGIR_ASSEMBLE(irq, SGIR_IRQ_SHIFT) |
SGIR_ASSEMBLE(affs.aff3, SGIR_AFF3_SHIFT) |
SGIR_ASSEMBLE(affs.aff2, SGIR_AFF2_SHIFT) |
SGIR_ASSEMBLE(affs.aff1, SGIR_AFF1_SHIFT) |
SGIR_ASSEMBLE(range_selector, SGIR_RS_SHIFT) |
SGIR_ASSEMBLE(target_list, SGIR_TARGET_LIST_SHIFT);
LTRACEF_LEVEL(2, "irq %u cpu %zu affs %02x:%02x:%02x:%02x rs %u tl 0x%x sgir 0x%016" PRIx64 "\n",
irq, cpu_num, affs.aff3, affs.aff2, affs.aff1, affs.aff0,
range_selector, target_list,
sgir);
return sgir;
}

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2019 LK Trusty Authors. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
void arm_gicv3_init(void);
void arm_gicv3_init_percpu(void);
void arm_gicv3_configure_irq_locked(unsigned int cpu, unsigned int vector);
void arm_gicv3_suspend_cpu(unsigned int cpu);
void arm_gicv3_resume_cpu_locked(unsigned int cpu, bool gicd);
uint64_t arm_gicv3_sgir_val(u_int irq, size_t cpu_num);
void arm_gicv3_wait_for_write_complete(void);

View File

@@ -1,17 +1,83 @@
/*
* Copyright (c) 2013, Google Inc. All rights reserved.
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DEV_INTERRUPT_ARM_GIC_H
#define __DEV_INTERRUPT_ARM_GIC_H
#include <sys/types.h>
/**
* arm_gic_init() - Legacy GIC initialization routine.
*
* This initializes the GIC using the %GICBASE and %GICx_OFFSET
* macros as the virtual addresses of the GIC banks, and assumes
* that the platform code has already mapped them into the
* address space.
*/
void arm_gic_init(void);
/**
* struct arm_gic_init_info - Initialization information for the GIC.
* @gicc_paddr: Physical address of GIC CPU interface registers.
* @gicc_size: Total size of GIC CPU interface registers.
* @gicd_paddr: Physical address of GIC Distributor registers.
* @gicd_size: Total size of GIC Distributor registers.
* @gicr_paddr: Physical address of GIC Redistributor registers.
* @gicr_size: Total size of GIC Redistributor registers.
*/
struct arm_gic_init_info {
paddr_t gicc_paddr;
size_t gicc_size;
paddr_t gicd_paddr;
size_t gicd_size;
paddr_t gicr_paddr;
size_t gicr_size;
};
/**
* arm_gic_init_map() - Map the GIC into the virtual address space and
* initialize it.
* @init_info: Pointer to a &struct arm_gic_init_info structure with the extra
* initialization information, e.g., the physical addresses and
* sizes of the GIC registers.
*
* This function maps the registers of the GICs then initializes the GIC.
* If ASLR is enabled then the virtual addresses are randomized.
*
*/
void arm_gic_init_map(struct arm_gic_init_info* init_info);
enum {
/* Ignore cpu_mask and forward interrupt to all CPUs other than the current cpu */
ARM_GIC_SGI_FLAG_TARGET_FILTER_NOT_SENDER = 0x1,
/* Ignore cpu_mask and forward interrupt to current CPU only */
ARM_GIC_SGI_FLAG_TARGET_FILTER_SENDER = 0x2,
ARM_GIC_SGI_FLAG_TARGET_FILTER_MASK = 0x3,
/* Only forward the interrupt to CPUs that has the interrupt configured as group 1 (non-secure) */
ARM_GIC_SGI_FLAG_NS = 0x4,
};
status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask);
#define GIC_BASE_SGI 0
#define GIC_BASE_PPI 16
#define GIC_BASE_SPI 32
@@ -26,17 +92,5 @@ enum interrupt_polarity {
IRQ_POLARITY_ACTIVE_LOW = 1,
};
enum {
/* Ignore cpu_mask and forward interrupt to all CPUs other than the current cpu */
ARM_GIC_SGI_FLAG_TARGET_FILTER_NOT_SENDER = 0x1,
/* Ignore cpu_mask and forward interrupt to current CPU only */
ARM_GIC_SGI_FLAG_TARGET_FILTER_SENDER = 0x2,
ARM_GIC_SGI_FLAG_TARGET_FILTER_MASK = 0x3,
/* Only forward the interrupt to CPUs that has the interrupt configured as group 1 (non-secure) */
ARM_GIC_SGI_FLAG_NS = 0x4,
};
status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask);
#endif

View File

@@ -2,7 +2,19 @@ LOCAL_DIR := $(GET_LOCAL_DIR)
MODULE := $(LOCAL_DIR)
GIC_VERSION ?= 2
MODULE_DEFINES += \
GIC_VERSION=$(GIC_VERSION) \
MODULE_SRCS += \
$(LOCAL_DIR)/arm_gic.c
MODULE_COMPILEFLAGS += -Wno-type-limits
# Build gic_v3 for versions 3 and 4
ifeq (,$(filter-out 3 4,$(GIC_VERSION)))
MODULE_SRCS += $(LOCAL_DIR)/gic_v3.c
endif
include make/module.mk

View File

@@ -10,6 +10,11 @@
#include <platform/qemu-virt.h>
#define GICBASE(n) (CPUPRIV_BASE_VIRT)
#define GICD_OFFSET (0x00000)
#define GICC_OFFSET (0x10000)
#if GIC_VERSION > 2
#define GICR_OFFSET (0xa0000)
#endif

View File

@@ -44,6 +44,8 @@ GLOBAL_DEFINES += \
CONSOLE_HAS_INPUT_BUFFER=1 \
TIMER_ARM_GENERIC_SELECTED=CNTV
GIC_VERSION := 3
GLOBAL_DEFINES += MMU_WITH_TRAMPOLINE=1
LINKER_SCRIPT += \

View File

@@ -105,7 +105,7 @@ fi
if (( DO_64BIT )); then
QEMU="qemu-system-aarch64"
CPU="cortex-a76" # default to something recent that modern qemu supports
MACHINE="virt"
MACHINE="virt,gic_version=3"
# if using KVM/HVF, switch to a host cpu and enable acceleration
if (( DO_KVM )); then
CPU="host"
@@ -132,7 +132,7 @@ elif (( DO_CORTEX_M3 )); then
else
QEMU="qemu-system-arm"
CPU="cortex-a15"
MACHINE="virt"
MACHINE="virt,gic_version=3"
MACHINE+=",highmem=off" # disable the high PCI ECAM, since we dont support LPAE to map it
_PROJECT="qemu-virt-arm32-test"
fi