Kyösti Mälkki has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/41950 )
Change subject: cpu/x86: Extend mp_ops for family hooks ......................................................................
cpu/x86: Extend mp_ops for family hooks
Allow family-specific routines to be called before and after SMM relocation is initiated.
Change-Id: I4f346b64f97050d065d766e59dc5bd3dfd985760 Signed-off-by: Kyösti Mälkki kyosti.malkki@gmail.com --- M src/cpu/intel/haswell/haswell_init.c M src/cpu/intel/model_2065x/model_2065x_init.c M src/cpu/intel/model_206ax/model_206ax_init.c M src/cpu/intel/smm/em64t101.c M src/cpu/intel/smm/gen1/smmrelocate.c M src/cpu/x86/mp_init.c M src/include/cpu/intel/smm_reloc.h M src/include/cpu/x86/mp.h M src/soc/intel/baytrail/cpu.c M src/soc/intel/braswell/cpu.c M src/soc/intel/broadwell/cpu.c 11 files changed, 29 insertions(+), 50 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/50/41950/1
diff --git a/src/cpu/intel/haswell/haswell_init.c b/src/cpu/intel/haswell/haswell_init.c index 7b0c2fd..345127a 100644 --- a/src/cpu/intel/haswell/haswell_init.c +++ b/src/cpu/intel/haswell/haswell_init.c @@ -733,11 +733,8 @@ *parallel = 1; }
-static void per_cpu_smm_trigger(void) +static void after_per_cpu_smm_trigger(void) { - /* Relocate the SMM handler. */ - smm_relocate(); - /* After SMM relocation a 2nd microcode load is required. */ intel_microcode_load_unlocked(microcode_patch); } @@ -764,7 +761,7 @@ if (CONFIG(HAVE_SMI_HANDLER)) select_mp_ops_em64t101_cpu_svrstr(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger; + mp_ops.after_per_cpu_smm_trigger = after_per_cpu_smm_trigger;
if (mp_init_with_smm(cpu_bus, &mp_ops)) printk(BIOS_ERR, "MP initialization failure.\n"); diff --git a/src/cpu/intel/model_2065x/model_2065x_init.c b/src/cpu/intel/model_2065x/model_2065x_init.c index c5ec156..59d998b 100644 --- a/src/cpu/intel/model_2065x/model_2065x_init.c +++ b/src/cpu/intel/model_2065x/model_2065x_init.c @@ -289,11 +289,8 @@ *parallel = 1; }
-static void per_cpu_smm_trigger(void) +static void after_per_cpu_smm_trigger(void) { - /* Relocate the SMM handler. */ - smm_relocate(); - /* After SMM relocation a 2nd microcode load is required. */ intel_microcode_load_unlocked(microcode_patch); } @@ -321,7 +318,7 @@ if (CONFIG(HAVE_SMI_HANDLER)) select_mp_ops_em64t101(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger; + mp_ops.after_per_cpu_smm_trigger = after_per_cpu_smm_trigger;
if (mp_init_with_smm(cpu_bus, &mp_ops)) printk(BIOS_ERR, "MP initialization failure.\n"); diff --git a/src/cpu/intel/model_206ax/model_206ax_init.c b/src/cpu/intel/model_206ax/model_206ax_init.c index 26fd04e..d250140 100644 --- a/src/cpu/intel/model_206ax/model_206ax_init.c +++ b/src/cpu/intel/model_206ax/model_206ax_init.c @@ -515,11 +515,8 @@ *parallel = 1; }
-static void per_cpu_smm_trigger(void) +static void after_per_cpu_smm_trigger(void) { - /* Relocate the SMM handler. */ - smm_relocate(); - /* After SMM relocation a 2nd microcode load is required. */ intel_microcode_load_unlocked(microcode_patch); } @@ -547,7 +544,7 @@ if (CONFIG(HAVE_SMI_HANDLER)) select_mp_ops_em64t101(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger; + mp_ops.after_per_cpu_smm_trigger = after_per_cpu_smm_trigger;
if (mp_init_with_smm(cpu_bus, &mp_ops)) printk(BIOS_ERR, "MP initialization failure.\n"); diff --git a/src/cpu/intel/smm/em64t101.c b/src/cpu/intel/smm/em64t101.c index e69e871..7f80e74 100644 --- a/src/cpu/intel/smm/em64t101.c +++ b/src/cpu/intel/smm/em64t101.c @@ -33,18 +33,12 @@ smm_initiate_relocation(); }
-void smm_relocate(void) +static void per_cpu_smm_trigger(void) { if (!boot_cpu()) smm_initiate_relocation(); }
-static void per_cpu_smm_trigger(void) -{ - /* Relocate the SMM handler. */ - smm_relocate(); -} - /* The relocation work is actually performed in SMM context, but the code * resides in the ramstage module. This occurs by trampolining from the default * SMRAM entry point to here. */ diff --git a/src/cpu/intel/smm/gen1/smmrelocate.c b/src/cpu/intel/smm/gen1/smmrelocate.c index 654659a..0e86115 100644 --- a/src/cpu/intel/smm/gen1/smmrelocate.c +++ b/src/cpu/intel/smm/gen1/smmrelocate.c @@ -20,15 +20,12 @@
#define SMRR_SUPPORTED (1 << 11)
-static void per_cpu_smm_trigger_alt_1(void) +static void before_per_cpu_smm_trigger_alt_1(void) { set_vmx_and_lock(); - - /* Relocate the SMM handler. */ - smm_relocate(); }
-static void per_cpu_smm_trigger_alt_2(void) +static void before_per_cpu_smm_trigger_alt_2(void) { msr_t mtrr_cap = rdmsr(MTRR_CAP_MSR); if (mtrr_cap.lo & SMRR_SUPPORTED) { @@ -51,9 +48,6 @@ wrmsr(IA32_FEATURE_CONTROL, ia32_ft_ctrl); } } - - /* Relocate the SMM handler. */ - smm_relocate(); }
/* The relocation work is actually performed in SMM context, but the code @@ -104,7 +98,7 @@
select_mp_ops_em64t101(mp_ops);
- mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger_alt_1; + mp_ops->before_per_cpu_smm_trigger = before_per_cpu_smm_trigger_alt_1;
get_fms(&c, cpuid_eax(1)); if (c.x86 != 6) @@ -120,5 +114,5 @@ }
mp_ops->relocation_handler = relocation_handler; - mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger_alt_2; + mp_ops->before_per_cpu_smm_trigger = before_per_cpu_smm_trigger_alt_2; } diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index caed8f4..7909f5d 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -843,8 +843,13 @@ /* Do nothing if SMM is disabled.*/ if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL) return; + /* Trigger SMM mode for the currently running processor. */ + if (mp_state.ops.before_per_cpu_smm_trigger) + mp_state.ops.before_per_cpu_smm_trigger(); mp_state.ops.per_cpu_smm_trigger(); + if (mp_state.ops.after_per_cpu_smm_trigger) + mp_state.ops.after_per_cpu_smm_trigger(); }
static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS]; diff --git a/src/include/cpu/intel/smm_reloc.h b/src/include/cpu/intel/smm_reloc.h index 137f9dd..cd347d5 100644 --- a/src/include/cpu/intel/smm_reloc.h +++ b/src/include/cpu/intel/smm_reloc.h @@ -37,9 +37,6 @@ void fill_in_ied_params(struct smm_relocation_params *params); void write_smm_msrs(int cpu, struct smm_relocation_params *relo_params);
-/* These helpers are for performing SMM relocation. */ -void smm_relocate(void); - #define D_OPEN (1 << 6) #define D_CLS (1 << 5) #define D_LCK (1 << 4) diff --git a/src/include/cpu/x86/mp.h b/src/include/cpu/x86/mp.h index 04f7804..52f52aa 100644 --- a/src/include/cpu/x86/mp.h +++ b/src/include/cpu/x86/mp.h @@ -66,6 +66,8 @@ * not provided, smm_initiate_relocation() is used. */ void (*per_cpu_smm_trigger)(void); + void (*before_per_cpu_smm_trigger)(void); + void (*after_per_cpu_smm_trigger)(void); /* * This function is called while each CPU is in the SMM relocation * handler. Its primary purpose is to adjust the SMBASE for the diff --git a/src/soc/intel/baytrail/cpu.c b/src/soc/intel/baytrail/cpu.c index 7598aaa..ac9f806 100644 --- a/src/soc/intel/baytrail/cpu.c +++ b/src/soc/intel/baytrail/cpu.c @@ -123,13 +123,10 @@ *parallel = 1; }
-static void per_cpu_smm_trigger(void) +static void after_per_cpu_smm_trigger(void) { const struct pattrs *pattrs = pattrs_get();
- /* Relocate SMM space. */ - smm_initiate_relocation(); - /* Load microcode after SMM relocation. */ intel_microcode_load_unlocked(pattrs->microcode_patch); } @@ -153,7 +150,7 @@ if (CONFIG(HAVE_SMI_HANDLER)) select_mp_ops_em64t100(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger; + mp_ops.after_per_cpu_smm_trigger = after_per_cpu_smm_trigger;
if (mp_init_with_smm(cpu_bus, &mp_ops)) { printk(BIOS_ERR, "MP initialization failure.\n"); diff --git a/src/soc/intel/braswell/cpu.c b/src/soc/intel/braswell/cpu.c index 33321f0..90ac2a3 100644 --- a/src/soc/intel/braswell/cpu.c +++ b/src/soc/intel/braswell/cpu.c @@ -128,7 +128,7 @@ *parallel = 1; }
-static void per_cpu_smm_trigger(void) +static void before_per_cpu_smm_trigger(void) { const struct pattrs *pattrs = pattrs_get(); msr_t msr_value; @@ -137,10 +137,11 @@ msr_value = rdmsr(IA32_BIOS_SIGN_ID); if (msr_value.hi == 0) intel_microcode_load_unlocked(pattrs->microcode_patch); +}
- /* Relocate SMM space. */ - smm_initiate_relocation(); - +static void after_per_cpu_smm_trigger(void) +{ + const struct pattrs *pattrs = pattrs_get(); /* Load microcode after SMM relocation. */ intel_microcode_load_unlocked(pattrs->microcode_patch); } @@ -166,7 +167,8 @@ if (CONFIG(HAVE_SMI_HANDLER)) select_mp_ops_em64t100(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger; + mp_ops.before_per_cpu_smm_trigger = before_per_cpu_smm_trigger; + mp_ops.after_per_cpu_smm_trigger = after_per_cpu_smm_trigger;
if (mp_init_with_smm(cpu_bus, &mp_ops)) printk(BIOS_ERR, "MP initialization failure.\n"); diff --git a/src/soc/intel/broadwell/cpu.c b/src/soc/intel/broadwell/cpu.c index ee9bc75..4d8419d 100644 --- a/src/soc/intel/broadwell/cpu.c +++ b/src/soc/intel/broadwell/cpu.c @@ -479,11 +479,8 @@ *parallel = 1; }
-static void per_cpu_smm_trigger(void) +static void after_per_cpu_smm_trigger(void) { - /* Relocate the SMM handler. */ - smm_relocate(); - /* After SMM relocation a 2nd microcode load is required. */ intel_microcode_load_unlocked(microcode_patch); } @@ -515,7 +512,7 @@ if (CONFIG(HAVE_SMI_HANDLER)) select_mp_ops_em64t101_cpu_svrstr(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger; + mp_ops.after_per_cpu_smm_trigger = after_per_cpu_smm_trigger;
if (mp_init_with_smm(cpu_bus, &mp_ops)) printk(BIOS_ERR, "MP initialization failure.\n");
Kyösti Mälkki has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/41950 )
Change subject: cpu/x86: Extend mp_ops for family hooks ......................................................................
Patch Set 6:
This change is ready for review.
Kyösti Mälkki has abandoned this change. ( https://review.coreboot.org/c/coreboot/+/41950 )
Change subject: cpu/x86: Extend mp_ops for family hooks ......................................................................
Abandoned