Werner Zeh submitted this change.

View Change

Approvals: Werner Zeh: Looks good to me, approved Julius Werner: Looks good to me, approved ron minnich: Looks good to me, approved build bot (Jenkins): Verified
arch/arm64: Add EL1/EL2/EL3 support for arm64

Currently, arch/arm64 requires coreboot to run on EL3 due
to EL3 register access. This might be an issue when, for example,
one boots into TF-A first and drops into EL2 for coreboot afterwards.

This patch aims at making arch/arm64 more versatile by removing the
current EL3 constraint and allowing arm64 coreboot to run on EL1,
EL2 and EL3.

The strategy here, is to add a Kconfig option (ARM64_CURRENT_EL) which
lets us specify coreboot's EL upon entry. Based on that, we access the
appropriate ELx registers. So, for example, when running coreboot on
EL1, we would not access vbar_el3 or vbar_el2 but instead vbar_el1.
This way, we don't generate faults when accessing higher-EL registers.

Currently only tested on the qemu-aarch64 target. Exceptions were
tested by enabling FATAL_ASSERTS.

Signed-off-by: David Milosevic <David.Milosevic@9elements.com>
Change-Id: Iae1c57f0846c8d0585384f7e54102a837e701e7e
Reviewed-on: https://review.coreboot.org/c/coreboot/+/74798
Reviewed-by: Werner Zeh <werner.zeh@siemens.com>
Reviewed-by: ron minnich <rminnich@gmail.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Julius Werner <jwerner@chromium.org>
---
M src/arch/arm64/Kconfig
M src/arch/arm64/armv8/cache.c
M src/arch/arm64/armv8/cpu.S
M src/arch/arm64/armv8/exception.c
M src/arch/arm64/armv8/mmu.c
M src/arch/arm64/boot.c
M src/arch/arm64/include/arch/asm.h
M src/arch/arm64/include/armv8/arch/cache.h
M src/arch/arm64/include/armv8/arch/lib_helpers.h
M src/arch/arm64/ramdetect.c
M src/arch/arm64/transition.c
M src/arch/arm64/transition_asm.S
12 files changed, 117 insertions(+), 38 deletions(-)

diff --git a/src/arch/arm64/Kconfig b/src/arch/arm64/Kconfig
index 9b89c37..2d71067 100644
--- a/src/arch/arm64/Kconfig
+++ b/src/arch/arm64/Kconfig
@@ -23,6 +23,20 @@

if ARCH_ARM64

+config ARM64_CURRENT_EL
+ int
+ default 3
+ range 1 3
+ help
+ The exception level on which coreboot is started. Accepted
+ values are: 1 (EL1), 2 (EL2) and 3 (EL3). This option can be
+ used to restrict access to available control registers in case
+ prior firmware already dropped to a lower exception level. By default,
+ coreboot is the first firmware that runs on the system and should thus
+ always run on EL3. This option is only provided for edge-case platforms
+ that require running a different firmware before coreboot which drops
+ to a lower exception level.
+
config ARM64_USE_ARCH_TIMER
bool
default n
@@ -30,7 +44,7 @@
config ARM64_USE_ARM_TRUSTED_FIRMWARE
bool
default n
- depends on ARCH_RAMSTAGE_ARM64
+ depends on ARCH_RAMSTAGE_ARM64 && ARM64_CURRENT_EL = 3

config ARM64_BL31_EXTERNAL_FILE
string "Path to external BL31.ELF (leave empty to build from source)"
diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c
index dbaedec..879ef62 100644
--- a/src/arch/arm64/armv8/cache.c
+++ b/src/arch/arm64/armv8/cache.c
@@ -148,10 +148,12 @@
*/
void arch_segment_loaded(uintptr_t start, size_t size, int flags)
{
- uint32_t sctlr = raw_read_sctlr_el3();
+ uint32_t sctlr = raw_read_sctlr();
+
if (sctlr & SCTLR_C)
dcache_clean_by_mva((void *)start, size);
else if (sctlr & SCTLR_I)
dcache_clean_invalidate_by_mva((void *)start, size);
+
icache_invalidate_all();
}
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
index 04bf6a7..a40ee64 100644
--- a/src/arch/arm64/armv8/cpu.S
+++ b/src/arch/arm64/armv8/cpu.S
@@ -77,10 +77,10 @@
memory (e.g. the stack) in between disabling and flushing the cache. */
ENTRY(mmu_disable)
str x30, [sp, #-0x8]
- mrs x0, sctlr_el3
+ mrs x0, CURRENT_EL(sctlr)
mov x1, #~(SCTLR_C | SCTLR_M)
and x0, x0, x1
- msr sctlr_el3, x0
+ msr CURRENT_EL(sctlr), x0
isb
bl dcache_clean_invalidate_all
ldr x30, [sp, #-0x8]
@@ -102,12 +102,11 @@

/* x22: SCTLR, return address: x23 (callee-saved by subroutine) */
mov x23, x30
- /* TODO: Assert that we always start running at EL3 */
- mrs x22, sctlr_el3
+ mrs x22, CURRENT_EL(sctlr)

/* Activate ICache already for speed during cache flush below. */
orr x22, x22, #SCTLR_I
- msr sctlr_el3, x22
+ msr CURRENT_EL(sctlr), x22
isb

/* Invalidate dcache */
@@ -116,13 +115,15 @@
/* Reinitialize SCTLR from scratch to known-good state.
This may disable MMU or DCache. */
ldr w22, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA)
- msr sctlr_el3, x22
+ msr CURRENT_EL(sctlr), x22

+#if CONFIG_ARM64_CURRENT_EL == EL3
/* Initialize SCR to unmask all interrupts (so that if we get a spurious
IRQ/SError we'll see it when it happens, not hang in BL31). This will
only have an effect after we DAIFClr in exception_init(). */
mov x22, #SCR_RES1 | SCR_IRQ | SCR_FIQ | SCR_EA
msr scr_el3, x22
+#endif

/* Invalidate icache and TLB for good measure */
ic iallu
diff --git a/src/arch/arm64/armv8/exception.c b/src/arch/arm64/armv8/exception.c
index 8583fd5..6035d74 100644
--- a/src/arch/arm64/armv8/exception.c
+++ b/src/arch/arm64/armv8/exception.c
@@ -51,9 +51,10 @@
struct regs *regs = &exc_state->regs;

printk(BIOS_DEBUG, "ELR = 0x%016llx ESR = 0x%08llx\n",
- elx->elr, raw_read_esr_el3());
+ elx->elr, raw_read_esr());
printk(BIOS_DEBUG, "FAR = 0x%016llx SPSR = 0x%08llx\n",
- raw_read_far_el3(), raw_read_spsr_el3());
+ raw_read_far(), raw_read_spsr());
+
for (i = 0; i < 30; i += 2) {
printk(BIOS_DEBUG,
"X%02d = 0x%016llx X%02d = 0x%016llx\n",
@@ -173,7 +174,8 @@
{
/* Update instruction pointer to next instruction. */
state->elx.elr += sizeof(uint32_t);
- raw_write_elr_el3(state->elx.elr);
+ raw_write_elr(state->elx.elr);
+
return EXC_RET_HANDLED;
}

diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c
index 3cedbcf..6105f9a 100644
--- a/src/arch/arm64/armv8/mmu.c
+++ b/src/arch/arm64/armv8/mmu.c
@@ -224,7 +224,7 @@

/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
dsb();
- tlbiall_el3();
+ tlbiall();
dsb();
isb();
}
@@ -245,15 +245,15 @@
assert((u8 *)root == _ttb);

/* Initialize TTBR */
- raw_write_ttbr0_el3((uintptr_t)root);
+ raw_write_ttbr0((uintptr_t)root);

/* Initialize MAIR indices */
- raw_write_mair_el3(MAIR_ATTRIBUTES);
+ raw_write_mair(MAIR_ATTRIBUTES);

/* Initialize TCR flags */
- raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
- TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
- TCR_TBI_USED);
+ raw_write_tcr(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
+ TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
+ TCR_TBI_USED);
}

/* Func : mmu_save_context
@@ -264,10 +264,10 @@
assert(mmu_context);

/* Back-up MAIR_ATTRIBUTES */
- mmu_context->mair = raw_read_mair_el3();
+ mmu_context->mair = raw_read_mair();

/* Back-up TCR value */
- mmu_context->tcr = raw_read_tcr_el3();
+ mmu_context->tcr = raw_read_tcr();
}

/* Func : mmu_restore_context
@@ -278,13 +278,13 @@
assert(mmu_context);

/* Restore TTBR */
- raw_write_ttbr0_el3((uintptr_t)_ttb);
+ raw_write_ttbr0((uintptr_t)_ttb);

/* Restore MAIR indices */
- raw_write_mair_el3(mmu_context->mair);
+ raw_write_mair(mmu_context->mair);

/* Restore TCR flags */
- raw_write_tcr_el3(mmu_context->tcr);
+ raw_write_tcr(mmu_context->tcr);

/* invalidate tlb since ttbr is updated. */
tlb_invalidate_all();
@@ -295,8 +295,8 @@
assert_correct_ttb_mapping(_ttb);
assert_correct_ttb_mapping((void *)((uintptr_t)_ettb - 1));

- uint32_t sctlr = raw_read_sctlr_el3();
- sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
- raw_write_sctlr_el3(sctlr);
+ uint32_t sctlr = raw_read_sctlr();
+ raw_write_sctlr(sctlr | SCTLR_C | SCTLR_M | SCTLR_I);
+
isb();
}
diff --git a/src/arch/arm64/boot.c b/src/arch/arm64/boot.c
index 89668e8..7a8060f 100644
--- a/src/arch/arm64/boot.c
+++ b/src/arch/arm64/boot.c
@@ -18,8 +18,10 @@

if (CONFIG(ARM64_USE_ARM_TRUSTED_FIRMWARE))
run_bl31((u64)doit, (u64)arg, payload_spsr);
- else
+ else if (CONFIG_ARM64_CURRENT_EL == EL3)
transition_to_el2(doit, arg, payload_spsr);
+ else
+ doit(arg);
}

void arch_prog_run(struct prog *prog)
diff --git a/src/arch/arm64/include/arch/asm.h b/src/arch/arm64/include/arch/asm.h
index df5952a..81921c5 100644
--- a/src/arch/arm64/include/arch/asm.h
+++ b/src/arch/arm64/include/arch/asm.h
@@ -28,4 +28,14 @@
ENTRY(name) \
.weak name \

+#if CONFIG_ARM64_CURRENT_EL == 1
+#define CURRENT_EL(reg) reg##_el1
+#elif CONFIG_ARM64_CURRENT_EL == 2
+#define CURRENT_EL(reg) reg##_el2
+#elif CONFIG_ARM64_CURRENT_EL == 3
+#define CURRENT_EL(reg) reg##_el3
+#else
+#error "Invalid setting for CONFIG_ARM64_CURRENT_EL!"
+#endif
+
#endif /* __ARM_ARM64_ASM_H */
diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h
index 3a72d40..5097196 100644
--- a/src/arch/arm64/include/armv8/arch/cache.h
+++ b/src/arch/arm64/include/armv8/arch/cache.h
@@ -55,7 +55,8 @@
static inline void tlb_invalidate_all(void)
{
/* TLBIALL includes dTLB and iTLB on systems that have them. */
- tlbiall_el3();
+
+ tlbiall();
dsb();
isb();
}
diff --git a/src/arch/arm64/include/armv8/arch/lib_helpers.h b/src/arch/arm64/include/armv8/arch/lib_helpers.h
index ff3459a..cb6c2ab 100644
--- a/src/arch/arm64/include/armv8/arch/lib_helpers.h
+++ b/src/arch/arm64/include/armv8/arch/lib_helpers.h
@@ -113,10 +113,49 @@
: : "r" (value) : "memory"); \
}

+/*
+ * In order to allow easy access to current EL's registers,
+ * we export following two functions for each EL register, that
+ * was passed to the MAKE_REGISTER_ACCESSORS_CURRENT_EL macro. Doing
+ * that, eliminates, or at least hides, repetitive branching on the
+ * current EL across the arm64 codebase.
+ *
+ * MAKE_REGISTER_ACCESSORS_CURRENT_EL was hooked into MAKE_REGISTER_ACCESSORS_EL123,
+ * in order to automatically generate current_el accessors only for registers which
+ * exist on EL1, EL2 and EL3.
+ *
+ * Note, that we don't handle EL0 here, as most of the defined registers do not
+ * have an EL0 variant (see MAKE_REGISTER_ACCESSORS_EL123).
+ *
+ * Important:
+ * - target register should be specified without the '_elx' suffix
+ * - only registers which exist in EL1, EL2 and EL3 should be passed
+ * to the MAKE_REGISTER_ACCESSORS_CURRENT_EL macro
+ */
+#define MAKE_REGISTER_ACCESSORS_CURRENT_EL(reg) \
+ static inline uint64_t raw_read_##reg(void) \
+ { \
+ if (CONFIG_ARM64_CURRENT_EL == EL1) \
+ return raw_read_##reg##_el1(); \
+ else if (CONFIG_ARM64_CURRENT_EL == EL2) \
+ return raw_read_##reg##_el2(); \
+ return raw_read_##reg##_el3(); \
+ } \
+ static inline void raw_write_##reg(uint64_t value) \
+ { \
+ if (CONFIG_ARM64_CURRENT_EL == EL1) \
+ raw_write_##reg##_el1(value); \
+ else if (CONFIG_ARM64_CURRENT_EL == EL2) \
+ raw_write_##reg##_el2(value); \
+ else \
+ raw_write_##reg##_el3(value); \
+ }
+
#define MAKE_REGISTER_ACCESSORS_EL123(reg) \
MAKE_REGISTER_ACCESSORS(reg##_el1) \
MAKE_REGISTER_ACCESSORS(reg##_el2) \
- MAKE_REGISTER_ACCESSORS(reg##_el3)
+ MAKE_REGISTER_ACCESSORS(reg##_el3) \
+ MAKE_REGISTER_ACCESSORS_CURRENT_EL(reg)

/* Architectural register accessors */
MAKE_REGISTER_ACCESSORS_EL123(actlr)
@@ -318,6 +357,16 @@
__asm__ __volatile__("tlbi alle3\n\t" : : : "memory");
}

+static inline void tlbiall(void)
+{
+ if (CONFIG_ARM64_CURRENT_EL == EL1)
+ tlbiall_el1();
+ else if (CONFIG_ARM64_CURRENT_EL == EL2)
+ tlbiall_el2();
+ else
+ tlbiall_el3();
+}
+
static inline void tlbiallis_el1(void)
{
__asm__ __volatile__("tlbi alle1is\n\t" : : : "memory");
diff --git a/src/arch/arm64/ramdetect.c b/src/arch/arm64/ramdetect.c
index 2f6a1cd..c99c5b4 100644
--- a/src/arch/arm64/ramdetect.c
+++ b/src/arch/arm64/ramdetect.c
@@ -13,12 +13,13 @@

static int abort_checker(struct exc_state *state, uint64_t vector_id)
{
- if (raw_read_esr_el3() >> 26 != 0x25)
+ if (raw_read_esr() >> 26 != 0x25)
return EXC_RET_IGNORED; /* Not a data abort. */

abort_state = ABORT_CHECKER_TRIGGERED;
state->elx.elr += sizeof(uint32_t); /* Jump over faulting instruction. */
- raw_write_elr_el3(state->elx.elr);
+ raw_write_elr(state->elx.elr);
+
return EXC_RET_HANDLED;
}

diff --git a/src/arch/arm64/transition.c b/src/arch/arm64/transition.c
index e5c83bf..8b84ce2 100644
--- a/src/arch/arm64/transition.c
+++ b/src/arch/arm64/transition.c
@@ -17,7 +17,9 @@
struct regs *regs = &exc_state->regs;
uint8_t elx_mode;

- elx->spsr = raw_read_spsr_el3();
+ elx->spsr = raw_read_spsr();
+ elx->elr = raw_read_elr();
+
elx_mode = get_mode_from_spsr(elx->spsr);

if (elx_mode == SPSR_USE_H)
@@ -25,8 +27,6 @@
else
regs->sp = raw_read_sp_el0();

- elx->elr = raw_read_elr_el3();
-
exc_dispatch(exc_state, id);
}

diff --git a/src/arch/arm64/transition_asm.S b/src/arch/arm64/transition_asm.S
index 73a6fb6..b4522e0 100644
--- a/src/arch/arm64/transition_asm.S
+++ b/src/arch/arm64/transition_asm.S
@@ -142,19 +142,16 @@
ENDPROC(exc_exit)

/*
- * exception_init_asm: Initialize VBAR and point SP_EL3 to exception stack.
+ * exception_init_asm: Initialize VBAR and point SP_ELx to exception stack.
* Also unmask aborts now that we can report them. x0 = end of exception stack
*/
ENTRY(exception_init_asm)
msr SPSel, #SPSR_USE_H
mov sp, x0
msr SPSel, #SPSR_USE_L
-
adr x0, exc_vectors
- msr vbar_el3, x0
-
+ msr CURRENT_EL(vbar), x0
msr DAIFClr, #0xf
-
dsb sy
isb
ret

To view, visit change 74798. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: coreboot
Gerrit-Branch: main
Gerrit-Change-Id: Iae1c57f0846c8d0585384f7e54102a837e701e7e
Gerrit-Change-Number: 74798
Gerrit-PatchSet: 19
Gerrit-Owner: David Milosevic <David.Milosevic@9elements.com>
Gerrit-Reviewer: Arthur Heymans <arthur@aheymans.xyz>
Gerrit-Reviewer: Christian Walter <christian.walter@9elements.com>
Gerrit-Reviewer: Felix Singer <service+coreboot-gerrit@felixsinger.de>
Gerrit-Reviewer: Julius Werner <jwerner@chromium.org>
Gerrit-Reviewer: Lean Sheng Tan <sheng.tan@9elements.com>
Gerrit-Reviewer: Maximilian Brune <maximilian.brune@9elements.com>
Gerrit-Reviewer: Werner Zeh <werner.zeh@siemens.com>
Gerrit-Reviewer: build bot (Jenkins) <no-reply@coreboot.org>
Gerrit-Reviewer: ron minnich <rminnich@gmail.com>
Gerrit-CC: Angel Pons <th3fanbus@gmail.com>
Gerrit-CC: Martin L Roth <gaumless@gmail.com>
Gerrit-CC: Nico Huber <nico.h@gmx.de>
Gerrit-CC: Yidi Lin <yidilin@google.com>
Gerrit-MessageType: merged