Arthur Heymans has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/50769 )
Change subject: cpu/x86/smm_module_hander: Set up a save state map ......................................................................
cpu/x86/smm_module_hander: Set up a save state map
With the smm_module_loaderv2 the save state map is not linear so copy a map from ramstage into the smihandler.
TESTED on QEMU q35: Both SMMLOADER V1 and V2 handle save states properly.
Change-Id: I31c57b59559ad4ee98500d83969424e5345881ee Signed-off-by: Arthur Heymans arthur@aheymans.xyz --- M src/cpu/x86/smm/smm_module_handler.c M src/cpu/x86/smm/smm_module_loader.c M src/cpu/x86/smm/smm_module_loaderv2.c M src/include/cpu/x86/smm.h 4 files changed, 19 insertions(+), 11 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/69/50769/1
diff --git a/src/cpu/x86/smm/smm_module_handler.c b/src/cpu/x86/smm/smm_module_handler.c index 9d7dfba..71e6030 100644 --- a/src/cpu/x86/smm/smm_module_handler.c +++ b/src/cpu/x86/smm/smm_module_handler.c @@ -95,15 +95,10 @@
void *smm_get_save_state(int cpu) { - char *base; + if (cpu > smm_runtime.num_cpus) + return NULL;
- /* This function assumes all save states start at top of default - * SMRAM size space and are staggered down by save state size. */ - base = (void *)(uintptr_t)smm_runtime.smbase; - base += SMM_DEFAULT_SIZE; - base -= (cpu + 1) * smm_runtime.save_state_size; - - return base; + return (void *)(smm_runtime.save_state_top[cpu] - smm_runtime.save_state_size); }
uint32_t smm_revision(void) diff --git a/src/cpu/x86/smm/smm_module_loader.c b/src/cpu/x86/smm/smm_module_loader.c index d0bb64a..e2ef2dc 100644 --- a/src/cpu/x86/smm/smm_module_loader.c +++ b/src/cpu/x86/smm/smm_module_loader.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */
+#include <stdint.h> #include <string.h> #include <acpi/acpi_gnvs.h> #include <rmodule.h> @@ -394,5 +395,10 @@ handler_mod_params->num_cpus = params->num_concurrent_stacks; handler_mod_params->gnvs_ptr = (uintptr_t)acpi_get_gnvs();
+ for (int i = 0; i < CONFIG_MAX_CPUS; i++) { + handler_mod_params->save_state_top[i] = (uintptr_t)smram + SMM_DEFAULT_SIZE + - params->per_cpu_save_state_size * i; + } + return smm_module_setup_stub(smram, size, params, fxsave_area); } diff --git a/src/cpu/x86/smm/smm_module_loaderv2.c b/src/cpu/x86/smm/smm_module_loaderv2.c index a2cd54a..a24edd5 100644 --- a/src/cpu/x86/smm/smm_module_loaderv2.c +++ b/src/cpu/x86/smm/smm_module_loaderv2.c @@ -318,7 +318,6 @@ { size_t total_save_state_size; size_t smm_stub_size; - size_t stub_entry_offset; char *smm_stub_loc; void *stacks_top; size_t size; @@ -404,7 +403,7 @@ /* * Placing multiple entry codes is only needed for the permanent handler. * The assumption is made that the permanent handler is not at SMM_DEFAULT_BASE. - * This assumption is sane as the default SMM space /* cannot hold our SMM setup. */ + * This assumption is sane as the default SMM space cannot hold our SMM setup. */ if (smbase != (void *)SMM_DEFAULT_BASE) { if (!smm_place_entry_code((uintptr_t)smbase, params->num_concurrent_save_states, @@ -634,5 +633,12 @@ printk(BIOS_DEBUG, "%s: cpu0 entry: %p\n", __func__, base); params->smm_entry = (uintptr_t)base + params->smm_main_entry_offset; - return smm_module_setup_stub(base, size, params, fxsave_area); + if (smm_module_setup_stub(base, size, params, fxsave_area)) + return -1; + + for (int i = 0; i < params->num_concurrent_stacks; i++) { + handler_mod_params->save_state_top[i] = + cpus[i].ss_start + params->per_cpu_save_state_size; + } + return 0; } diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h index 71227f4..e1cc064 100644 --- a/src/include/cpu/x86/smm.h +++ b/src/include/cpu/x86/smm.h @@ -60,6 +60,7 @@ u32 save_state_size; u32 num_cpus; u32 gnvs_ptr; + uintptr_t save_state_top[CONFIG_MAX_CPUS]; } __packed;
struct smm_module_params {