Attention is currently required from: Jason Glenesk, Raul Rangel, Marshall Dawson, Fred Reitberger, Felix Held. Arthur Heymans has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/64872 )
Change subject: soc/amd: Do SMM relocation via MSR ......................................................................
soc/amd: Do SMM relocation via MSR
AMD CPUs have a convenient MSR that allows to set the SMBASE in the save state without ever entering SMM (e.g. at the default 0x30000 address). This has been a feature in all AMD CPUs since at least AMD K8. This allows to do relocation in parallel in ramstage and without setting up a relocation handler, which likely results in a speedup. The more cores the higher the speedup as relocation was happening sequentially. On a 4 core AMD picasso system this results in 33ms boot speedup.
TESTED on google/vilboz (Picasso) with CONFIG_SMI_DEBUG: verify that SMM is correctly relocated with the BSP correctly entering the smihandler.
Change-Id: I9729fb94ed5c18cfd57b8098c838c08a04490e4b Signed-off-by: Arthur Heymans arthur@aheymans.xyz --- M src/cpu/amd/smm/smm_helper.c M src/cpu/x86/Kconfig M src/cpu/x86/mp_init.c M src/soc/amd/common/Kconfig.common 4 files changed, 25 insertions(+), 6 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/72/64872/1
diff --git a/src/cpu/amd/smm/smm_helper.c b/src/cpu/amd/smm/smm_helper.c index 585fe0d..7724791 100644 --- a/src/cpu/amd/smm/smm_helper.c +++ b/src/cpu/amd/smm/smm_helper.c @@ -112,9 +112,12 @@ { setup_tseg();
- amd64_smm_state_save_area_t *smm_state; - smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase); - smm_state->smbase = staggered_smbase; + uintptr_t smbase = smm_get_cpu_smbase(cpu_index()); + msr_t msr = { + .hi = 0, + .lo = smbase + }; + wrmsr(SMM_BASE_MSR, msr);
tseg_valid(); lock_smm(); diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig index 5b92795..28705c1 100644 --- a/src/cpu/x86/Kconfig +++ b/src/cpu/x86/Kconfig @@ -18,6 +18,13 @@ Allow APs to do other work after initialization instead of going to sleep.
+config X86_SMM_RELOCATION_HANDLER + bool + def_bool y + help + Select this on platforms that do SMM relocation using a + relocation handler running in SMM with a stub at 0x30000. + config LEGACY_SMP_INIT bool
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index e4e662e..f4c5cd0 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -755,6 +755,9 @@
static enum cb_err install_relocation_handler(int num_cpus, size_t save_state_size) { + if (!CONFIG(X86_SMM_RELOCATION_HANDLER)) + return CB_SUCCESS; + struct smm_loader_params smm_params = { .num_cpus = num_cpus, .cpu_save_state_size = save_state_size, @@ -1137,7 +1140,7 @@
/* Sanity check SMM state. */ if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 && - mp_state.ops.relocation_handler != NULL) + (mp_state.ops.relocation_handler != NULL || !CONFIG(X86_SMM_RELOCATION_HANDLER))) smm_enable();
if (is_smm_enabled()) @@ -1152,11 +1155,13 @@ mp_params.num_records = ARRAY_SIZE(mp_steps);
/* Perform backup of default SMM area. */ - default_smm_area = backup_default_smm_area(); + if (CONFIG(X86_SMM_RELOCATION_HANDLER)) + default_smm_area = backup_default_smm_area();
ret = mp_init(cpu_bus, &mp_params);
- restore_default_smm_area(default_smm_area); + if (CONFIG(X86_SMM_RELOCATION_HANDLER)) + restore_default_smm_area(default_smm_area);
/* Signal callback on success if it's provided. */ if (ret == CB_SUCCESS && mp_state.ops.post_mp_init != NULL) diff --git a/src/soc/amd/common/Kconfig.common b/src/soc/amd/common/Kconfig.common index 5a84f2f..7583f9c 100644 --- a/src/soc/amd/common/Kconfig.common +++ b/src/soc/amd/common/Kconfig.common @@ -5,6 +5,10 @@
if SOC_AMD_COMMON
+# ALL AMD CPUs can relocate SMM via MSRC001_0111 +config X86_SMM_RELOCATION_HANDLER + default n + source "src/soc/amd/common/block/*/Kconfig" source "src/soc/amd/common/fsp/Kconfig" source "src/soc/amd/common/pi/Kconfig"