Hello Kyösti Mälkki, Aaron Durbin, Arthur Heymans, cedarhouse1@comcast.net,
I'd like you to do a code review. Please visit
https://review.coreboot.org/c/coreboot/+/38768
to review the following change.
Change subject: [UNTESTED] intel/stm: Introduce stm_update_smm_info() ......................................................................
[UNTESTED] intel/stm: Introduce stm_update_smm_info()
Add a helper that will be called from `mp_init.c`. This allows to keep SMRAM allocations local to STM code.
Call it after validation of the original numbers.
Change-Id: I2c29a4adfc78f21126122caefffa27a4d6d8c5df Signed-off-by: Nico Huber nico.h@gmx.de --- M src/cpu/x86/mp_init.c M src/security/intel/stm/SmmStm.h M src/security/intel/stm/StmPlatformSmm.c 3 files changed, 26 insertions(+), 21 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/68/38768/1
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index 9bc4aab..5b34d4b 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -1042,26 +1042,6 @@ &state->smm_save_state_size);
/* - * Make sure there is enough room for the SMM descriptor - */ - if (CONFIG(STM)) { - state->smm_save_state_size += - sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR); - - /* Currently, the CPU SMM save state size is based on a simplistic - * algorithm. (align on 4K) - * note: In the future, this will need to handle newer x86 processors - * that require alignment of the save state on 32K boundaries. - * The alignment is done here because coreboot has a hard coded - * value of 0x400 for this value. - * Also, this alignment only works on CPUs less than 5 threads - */ - if (CONFIG(STM)) - state->smm_save_state_size = - ALIGN_UP(state->smm_save_state_size, 0x1000); - } - - /* * Default to smm_initiate_relocation() if trigger callback isn't * provided. */ @@ -1093,8 +1073,11 @@ mp_state.ops.relocation_handler != NULL) smm_enable();
- if (is_smm_enabled()) + if (is_smm_enabled()) { printk(BIOS_INFO, "Will perform SMM setup.\n"); + stm_update_smm_info(&mp_state.perm_smbase, &mp_state.perm_smsize, + &mp_state.smm_save_state_size); + }
mp_params.num_cpus = mp_state.cpu_count; /* Gather microcode information. */ diff --git a/src/security/intel/stm/SmmStm.h b/src/security/intel/stm/SmmStm.h index 1690255..f6ab0bf 100644 --- a/src/security/intel/stm/SmmStm.h +++ b/src/security/intel/stm/SmmStm.h @@ -27,6 +27,13 @@ */ int load_stm_image(uintptr_t mseg);
+#if CONFIG(STM) +void stm_update_smm_info( + uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size); +#else +static inline void stm_update_smm_info(uintptr_t *, size_t *, size_t *) {} +#endif + void stm_setup( uintptr_t mseg, int cpu, uintptr_t smbase, uintptr_t smbase_base, uint32_t offset32); diff --git a/src/security/intel/stm/StmPlatformSmm.c b/src/security/intel/stm/StmPlatformSmm.c index 5be6ee7..c389afa 100644 --- a/src/security/intel/stm/StmPlatformSmm.c +++ b/src/security/intel/stm/StmPlatformSmm.c @@ -154,6 +154,21 @@
static int stm_load_status = 0;
+void stm_update_smm_info( + uintptr_t *const perm_smbase, size_t *const perm_smsize, + size_t *const smm_save_state_size) +{ + *smm_save_state_size += sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR); + + /* + * Currently, the CPU SMM save state size is based on a simplistic + * algorithm. (align on 4K) + * note: In the future, this will need to handle newer x86 processors + * that require alignment of the save state on 32K boundaries. + */ + *smm_save_state_size = ALIGN_UP(*smm_save_state_size, 0x1000); +} + void stm_setup(uintptr_t mseg, int cpu, uintptr_t smbase, uintptr_t base_smbase, uint32_t offset32) {