David Milosevic has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/74798 )
Change subject: arch/arm64: Add EL1/EL2/EL3 support for arm64 ......................................................................
arch/arm64: Add EL1/EL2/EL3 support for arm64
Currently, arch/arm64 requires coreboot to run on EL3 due to EL3 register access. This might be an issue when, for example, one boots into TF-A first and drops into EL2 for coreboot afterwards.
This patch aims at making arch/arm64 more versatile by removing the current EL3 constraint and allowing arm64 coreboot to run on EL1, EL2 and EL3.
The strategy here, is to read coreboot's current EL via the 'currentel' register and choose the appropiate ELx register. So, for example, when running coreboot on EL1, we would not access vbar_el3 or vbar_el2 but instead vbar_el1. This way, we don't generate faults when accessing higher-EL registers.
Currently only tested on the qemu-aarch64 target. Exceptions were tested by enabling FATAL_ASSERTS.
Signed-off-by: David Milosevic David.Milosevic@9elements.com Change-Id: Iae1c57f0846c8d0585384f7e54102a837e701e7e --- M src/arch/arm64/armv8/cache.c M src/arch/arm64/armv8/exception.c M src/arch/arm64/armv8/mmu.c M src/arch/arm64/include/armv8/arch/cache.h M src/arch/arm64/include/armv8/arch/lib_helpers.h M src/arch/arm64/ramdetect.c M src/arch/arm64/transition.c M src/arch/arm64/transition_asm.S 8 files changed, 140 insertions(+), 26 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/98/74798/1
diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c index ae0729e..85ce683 100644 --- a/src/arch/arm64/armv8/cache.c +++ b/src/arch/arm64/armv8/cache.c @@ -90,10 +90,13 @@ */ void arch_segment_loaded(uintptr_t start, size_t size, int flags) { - uint32_t sctlr = raw_read_sctlr_el3(); + uint32_t sctlr = raw_read_sctlr_current_el(); + if (sctlr & SCTLR_C) dcache_clean_by_mva((void *)start, size); else if (sctlr & SCTLR_I) dcache_clean_invalidate_by_mva((void *)start, size); + icache_invalidate_all(); + } diff --git a/src/arch/arm64/armv8/exception.c b/src/arch/arm64/armv8/exception.c index 80b619d..a3113d6 100644 --- a/src/arch/arm64/armv8/exception.c +++ b/src/arch/arm64/armv8/exception.c @@ -51,9 +51,10 @@ struct regs *regs = &exc_state->regs;
printk(BIOS_DEBUG, "ELR = 0x%016llx ESR = 0x%08llx\n", - elx->elr, raw_read_esr_el3()); + elx->elr, raw_read_esr_current_el()); printk(BIOS_DEBUG, "FAR = 0x%016llx SPSR = 0x%08llx\n", - raw_read_far_el3(), raw_read_spsr_el3()); + raw_read_far_current_el(), raw_read_spsr_current_el()); + for (i = 0; i < 30; i += 2) { printk(BIOS_DEBUG, "X%02d = 0x%016llx X%02d = 0x%016llx\n", @@ -162,7 +163,8 @@ { /* Update instruction pointer to next instrution. */ state->elx.elr += sizeof(uint32_t); - raw_write_elr_el3(state->elx.elr); + raw_write_elr_current_el(state->elx.elr); + return EXC_RET_HANDLED; }
diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c index 61ca49f..8b38c13 100644 --- a/src/arch/arm64/armv8/mmu.c +++ b/src/arch/arm64/armv8/mmu.c @@ -224,7 +224,12 @@
/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */ dsb(); - tlbiall_el3(); + + if (current_el() == EL2) + tlbiall_el2(); + else + tlbiall_el3(); + dsb(); isb(); } @@ -245,15 +250,15 @@ assert((u8 *)root == _ttb);
/* Initialize TTBR */ - raw_write_ttbr0_el3((uintptr_t)root); + raw_write_ttbr0_current_el((uintptr_t)root);
/* Initialize MAIR indices */ - raw_write_mair_el3(MAIR_ATTRIBUTES); + raw_write_mair_current_el(MAIR_ATTRIBUTES);
/* Initialize TCR flags */ - raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | - TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB | - TCR_TBI_USED); + raw_write_tcr_current_el(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | + TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB | + TCR_TBI_USED); }
/* Func : mmu_save_context @@ -264,10 +269,10 @@ assert(mmu_context);
/* Back-up MAIR_ATTRIBUTES */ - mmu_context->mair = raw_read_mair_el3(); + mmu_context->mair = raw_read_mair_current_el();
/* Back-up TCR value */ - mmu_context->tcr = raw_read_tcr_el3(); + mmu_context->tcr = raw_read_tcr_current_el(); }
/* Func : mmu_restore_context @@ -278,13 +283,13 @@ assert(mmu_context);
/* Restore TTBR */ - raw_write_ttbr0_el3((uintptr_t)_ttb); + raw_write_ttbr0_current_el((uintptr_t)_ttb);
/* Restore MAIR indices */ - raw_write_mair_el3(mmu_context->mair); + raw_write_mair_current_el(mmu_context->mair);
/* Restore TCR flags */ - raw_write_tcr_el3(mmu_context->tcr); + raw_write_tcr_current_el(mmu_context->tcr);
/* invalidate tlb since ttbr is updated. */ tlb_invalidate_all(); @@ -295,8 +300,8 @@ assert_correct_ttb_mapping(_ttb); assert_correct_ttb_mapping(_ettb - 1);
- uint32_t sctlr = raw_read_sctlr_el3(); - sctlr |= SCTLR_C | SCTLR_M | SCTLR_I; - raw_write_sctlr_el3(sctlr); + uint32_t sctlr = raw_read_sctlr_current_el(); + raw_write_sctlr_current_el(sctlr | SCTLR_C | SCTLR_M | SCTLR_I); + isb(); } diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h index bb9d6fe..a972007 100644 --- a/src/arch/arm64/include/armv8/arch/cache.h +++ b/src/arch/arm64/include/armv8/arch/cache.h @@ -34,7 +34,12 @@ static inline void tlb_invalidate_all(void) { /* TLBIALL includes dTLB and iTLB on systems that have them. */ - tlbiall_el3(); + + if (current_el() == EL2) + tlbiall_el2(); + else + tlbiall_el3(); + dsb(); isb(); } diff --git a/src/arch/arm64/include/armv8/arch/lib_helpers.h b/src/arch/arm64/include/armv8/arch/lib_helpers.h index 4b3730f..d62c824 100644 --- a/src/arch/arm64/include/armv8/arch/lib_helpers.h +++ b/src/arch/arm64/include/armv8/arch/lib_helpers.h @@ -113,10 +113,53 @@ : : "r" (value) : "memory"); \ }
+/* Forward Declaration for MAKE_REGISTER_ACCESSORS_CURRENT_EL */ +static inline u64 current_el(void); +/* + * In order to allow easy access to current EL's registers, + * we export following two functions for each EL register, that + * was passed to the MAKE_REGISTER_ACCESSORS_CURRENT_EL macro. Doing + * this, eliminates, or at least hides, repetitive branching on the + * current EL across the arm64 codebase. + * + * MAKE_REGISTER_ACCESSORS_CURRENT_EL was hooked into MAKE_REGISTER_ACCESSORS_EL123, + * in order to automatically generate current_el accessors only for registers which + * exist on EL1, EL2 and EL3. + * + * Note, that we don't handle EL0 here, as EL0 has no access + * to the currentel register. + * + * Important: + * - target register should be specified without the '_elx' suffix + * - only registers which exist in EL1, EL2 and EL3 should be passed + * to the MAKE_REGISTER_ACCESSORS_CURRENT_EL macro + */ +#define MAKE_REGISTER_ACCESSORS_CURRENT_EL(reg) \ + static inline uint64_t raw_read_##reg##_current_el(void) \ + { \ + uint64_t exception_level = current_el(); \ + if (exception_level == EL1) \ + return raw_read_##reg##_el1(); \ + else if (exception_level == EL2) \ + return raw_read_##reg##_el2(); \ + return raw_read_##reg##_el3(); \ + } \ + static inline void raw_write_##reg##_current_el(uint64_t value) \ + { \ + uint64_t exception_level = current_el(); \ + if (exception_level == EL1) \ + raw_write_##reg##_el1(value); \ + else if (exception_level == EL2) \ + raw_write_##reg##_el2(value); \ + else \ + raw_write_##reg##_el3(value); \ + } + #define MAKE_REGISTER_ACCESSORS_EL123(reg) \ MAKE_REGISTER_ACCESSORS(reg##_el1) \ MAKE_REGISTER_ACCESSORS(reg##_el2) \ - MAKE_REGISTER_ACCESSORS(reg##_el3) + MAKE_REGISTER_ACCESSORS(reg##_el3) \ + MAKE_REGISTER_ACCESSORS_CURRENT_EL(reg)
/* Architectural register accessors */ MAKE_REGISTER_ACCESSORS_EL123(actlr) @@ -196,6 +239,15 @@ MAKE_REGISTER_ACCESSORS(vtcr_el2) MAKE_REGISTER_ACCESSORS(vttbr_el2)
+/* Exception Level functions */ +static inline u64 current_el(void) +{ + register uint64_t x0 __asm__("x0"); + __asm__ __volatile__("mrs x0, CurrentEL;" + : : : "%x0"); + return x0 >> 2; +} + /* Special DAIF accessor functions */ static inline void enable_debug_exceptions(void) { diff --git a/src/arch/arm64/ramdetect.c b/src/arch/arm64/ramdetect.c index 2f6a1cd..cfcc055 100644 --- a/src/arch/arm64/ramdetect.c +++ b/src/arch/arm64/ramdetect.c @@ -13,12 +13,13 @@
static int abort_checker(struct exc_state *state, uint64_t vector_id) { - if (raw_read_esr_el3() >> 26 != 0x25) + if (raw_read_esr_current_el() >> 26 != 0x25) return EXC_RET_IGNORED; /* Not a data abort. */
abort_state = ABORT_CHECKER_TRIGGERED; state->elx.elr += sizeof(uint32_t); /* Jump over faulting instruction. */ - raw_write_elr_el3(state->elx.elr); + raw_write_elr_current_el(state->elx.elr); + return EXC_RET_HANDLED; }
diff --git a/src/arch/arm64/transition.c b/src/arch/arm64/transition.c index e5c83bf..72582d4 100644 --- a/src/arch/arm64/transition.c +++ b/src/arch/arm64/transition.c @@ -17,7 +17,9 @@ struct regs *regs = &exc_state->regs; uint8_t elx_mode;
- elx->spsr = raw_read_spsr_el3(); + elx->spsr = raw_read_spsr_current_el(); + elx->elr = raw_read_elr_current_el(); + elx_mode = get_mode_from_spsr(elx->spsr);
if (elx_mode == SPSR_USE_H) @@ -25,8 +27,6 @@ else regs->sp = raw_read_sp_el0();
- elx->elr = raw_read_elr_el3(); - exc_dispatch(exc_state, id); }
diff --git a/src/arch/arm64/transition_asm.S b/src/arch/arm64/transition_asm.S index 73a6fb6..818d103 100644 --- a/src/arch/arm64/transition_asm.S +++ b/src/arch/arm64/transition_asm.S @@ -142,7 +142,7 @@ ENDPROC(exc_exit)
/* - * exception_init_asm: Initialize VBAR and point SP_EL3 to exception stack. + * exception_init_asm: Initialize VBAR and point SP_ELx to exception stack. * Also unmask aborts now that we can report them. x0 = end of exception stack */ ENTRY(exception_init_asm) @@ -151,8 +151,27 @@ msr SPSel, #SPSR_USE_L
adr x0, exc_vectors + + mrs x1, currentel + ubfx w1, w1, #2, #8 + cmp w1, #0x1 + b.ne el2_or_el3 +el1: + msr vbar_el1, x0 + b end + +el2_or_el3: + cmp w1, #0x2 + b.ne el3 + +el2: + msr vbar_el2, x0 + b end + +el3: msr vbar_el3, x0
+end: msr DAIFClr, #0xf
dsb sy