Aaron Durbin (adurbin@chromium.org) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14597
-gerrit
commit d319bdbc2be35834eb1d30dee7a0364e15c35f42 Author: Aaron Durbin adurbin@chromium.org Date: Tue May 3 17:45:59 2016 -0500
soc/intel/skylake: convert to using common MP and SMM init
In order to reduce duplication of code use the common MP and SMM initialization flow.
Change-Id: I5c4674ed258922b6616d75f070df976ef9fad209 Signed-off-by: Aaron Durbin adurbin@chromium.org --- src/soc/intel/skylake/cpu.c | 147 ++++++++++++++------------------ src/soc/intel/skylake/include/soc/cpu.h | 6 -- src/soc/intel/skylake/include/soc/smm.h | 15 ++-- src/soc/intel/skylake/smmrelocate.c | 144 ++++--------------------------- 4 files changed, 92 insertions(+), 220 deletions(-)
diff --git a/src/soc/intel/skylake/cpu.c b/src/soc/intel/skylake/cpu.c index 17fa0ae..b7353ca 100644 --- a/src/soc/intel/skylake/cpu.c +++ b/src/soc/intel/skylake/cpu.c @@ -346,13 +346,6 @@ static void configure_mca(void) wrmsr(IA32_MC0_STATUS + (i * 4), msr); }
-static void bsp_init_before_ap_bringup(struct bus *cpu_bus) -{ - /* Setup MTRRs based on physical address size. */ - x86_setup_mtrrs_with_detect(); - x86_mtrr_check(); -} - /* All CPUs including BSP will run the following function. */ static void cpu_core_init(device_t cpu) { @@ -382,48 +375,6 @@ static void cpu_core_init(device_t cpu) enable_turbo(); }
-/* MP initialization support. */ -static const void *microcode_patch; -int ht_disabled; - -static int adjust_apic_id_ht_disabled(int index, int apic_id) -{ - return 2 * index; -} - -static void relocate_and_load_microcode(void) -{ - /* Relocate the SMM handler. */ - smm_relocate(); - - /* After SMM relocation a 2nd microcode load is required. */ - intel_microcode_load_unlocked(microcode_patch); -} - -static void enable_smis(void) -{ - /* - * Now that all APs have been relocated as well as the BSP let SMIs - * start flowing. - */ - southbridge_smm_enable_smi(); - - /* Lock down the SMRAM space. */ -#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) - smm_lock(); -#endif -} - -static struct mp_flight_record mp_steps[] = { - MP_FR_NOBLOCK_APS(relocate_and_load_microcode, - relocate_and_load_microcode), -#if IS_ENABLED(CONFIG_SMP) - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish initialization before proceeding. */ -#endif - MP_FR_BLOCK_APS(NULL, enable_smis), -}; - static struct device_operations cpu_dev_ops = { .init = cpu_core_init, }; @@ -439,14 +390,22 @@ static const struct cpu_driver driver __cpu_driver = { .id_table = cpu_table, };
-void soc_init_cpus(device_t dev) +/* MP initialization support. */ +static const void *microcode_patch; +static int ht_disabled; + +static void pre_mp_init(void) { - struct bus *cpu_bus = dev->link_list; + /* Setup MTRRs based on physical address size. */ + x86_setup_mtrrs_with_detect(); + x86_mtrr_check(); +} + +static int get_cpu_count(void) +{ + msr_t msr; int num_threads; int num_cores; - msr_t msr; - struct mp_params mp_params; - void *smm_save_area;
msr = rdmsr(CORE_THREAD_COUNT_MSR); num_threads = (msr.lo >> 0) & 0xffff; @@ -456,45 +415,69 @@ void soc_init_cpus(device_t dev)
ht_disabled = num_threads == num_cores;
- /* - * Perform any necessary BSP initialization before APs are brought up. - * This call also allows the BSP to prepare for any secondary effects - * from calling cpu_initialize() such as smm_init(). - */ - bsp_init_before_ap_bringup(cpu_bus); + return num_threads; +}
+static void get_microcode_info(const void **microcode, int *parallel) +{ microcode_patch = intel_microcode_find(); + *microcode = microcode_patch; + *parallel = 1; +}
- /* Save default SMM area before relocation occurs. */ - if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) - smm_save_area = backup_default_smm_area(); - else - smm_save_area = NULL; - - mp_params.num_cpus = num_threads; - mp_params.parallel_microcode_load = 1; +static int adjust_apic_id(int index, int apic_id) +{ if (ht_disabled) - mp_params.adjust_apic_id = adjust_apic_id_ht_disabled; + return 2 * index; else - mp_params.adjust_apic_id = NULL; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = microcode_patch; + return index; +}
- /* Load relocation and permeanent handlers. Then initiate relocation. */ - if (smm_initialize()) - printk(BIOS_CRIT, "SMM Initialiazation failed...\n"); +static void per_cpu_smm_trigger(void) +{ + /* Relocate the SMM handler. */ + smm_relocate();
- if (IS_ENABLED(CONFIG_SMP)) - if (mp_init(cpu_bus, &mp_params)) - printk(BIOS_ERR, "MP initialization failure.\n"); + /* After SMM relocation a 2nd microcode load is required. */ + intel_microcode_load_unlocked(microcode_patch); +}
+static void post_mp_init(void) +{ /* Set Max Ratio */ set_max_ratio();
- /* Restore the default SMM region. */ - if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) - restore_default_smm_area(smm_save_area); + /* + * Now that all APs have been relocated as well as the BSP let SMIs + * start flowing. + */ + southbridge_smm_enable_smi(); + + /* Lock down the SMRAM space. */ +#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) + smm_lock(); +#endif +} + +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_smm_info = smm_info, + .get_microcode_info = get_microcode_info, + .adjust_cpu_apic_entry = adjust_apic_id, + .pre_mp_smm_init = smm_initialize, + .per_cpu_smm_trigger = per_cpu_smm_trigger, + .relocation_handler = smm_relocation_handler, + .post_mp_init = post_mp_init, +}; + +void soc_init_cpus(device_t dev) +{ + struct bus *cpu_bus = dev->link_list; + + if (mp_init_with_smm(cpu_bus, &mp_ops)) { + printk(BIOS_ERR, "MP initialization failure.\n"); + } }
int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id) diff --git a/src/soc/intel/skylake/include/soc/cpu.h b/src/soc/intel/skylake/include/soc/cpu.h index 8dd9742..f91a0ef 100644 --- a/src/soc/intel/skylake/include/soc/cpu.h +++ b/src/soc/intel/skylake/include/soc/cpu.h @@ -57,12 +57,6 @@ void set_power_limits(u8 power_limit_1_time); int cpu_config_tdp_levels(void);
-/* - * Determine if HyperThreading is disabled. - * The variable is not valid until setup_ap_init() has been called. - */ -extern int ht_disabled; - /* CPU identification */ u32 cpu_family_model(void); u32 cpu_stepping(void); diff --git a/src/soc/intel/skylake/include/soc/smm.h b/src/soc/intel/skylake/include/soc/smm.h index 94bdc55..fa8da46 100644 --- a/src/soc/intel/skylake/include/soc/smm.h +++ b/src/soc/intel/skylake/include/soc/smm.h @@ -53,7 +53,11 @@ void mainboard_smi_gpi_handler(const struct gpi_status *sts);
#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) -int smm_initialize(void); +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase); +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size); +void smm_initialize(void); void smm_relocate(void);
/* These helpers are for performing SMM relocation. */ @@ -68,10 +72,11 @@ void southbridge_clear_smi_status(void); void southbridge_smm_clear_state(void); void southbridge_smm_enable_smi(void); #else /* CONFIG_HAVE_SMI_HANDLER */ -static inline int smm_initialize(void) -{ - return 0; -} +static inline void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) {} +static inline void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) {} +static inline void smm_initialize(void) {}
static inline void smm_relocate(void) {} static inline void southbridge_trigger_smi(void) {} diff --git a/src/soc/intel/skylake/smmrelocate.c b/src/soc/intel/skylake/smmrelocate.c index a14721f..fcb89de 100644 --- a/src/soc/intel/skylake/smmrelocate.c +++ b/src/soc/intel/skylake/smmrelocate.c @@ -54,9 +54,9 @@ static inline void write_uncore_emrr(struct smm_relocation_params *relo_params) wrmsr(UNCORE_PRMRR_PHYS_MASK_MSR, relo_params->uncore_emrr_mask); }
-static void update_save_state(int cpu, - struct smm_relocation_params *relo_params, - const struct smm_runtime *runtime) +static void update_save_state(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase, + struct smm_relocation_params *relo_params) { u32 smbase; u32 iedbase; @@ -66,7 +66,7 @@ static void update_save_state(int cpu, * stagger the entry points adjusting SMBASE downwards by save state * size * CPU num. */ - smbase = relo_params->smram_base - cpu * runtime->save_state_size; + smbase = staggered_smbase; iedbase = relo_params->ied_base;
printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n", @@ -101,8 +101,8 @@ static void update_save_state(int cpu, } else { em64t101_smm_state_save_area_t *save_state;
- save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE - - runtime->save_state_size); + save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - + sizeof(*save_state));
save_state->smbase = smbase; save_state->iedbase = iedbase; @@ -132,24 +132,11 @@ static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params) * resides in the ramstage module. This occurs by trampolining from the default * SMRAM entry point to here. */ -static void asmlinkage cpu_smm_do_relocation(void *arg) +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { msr_t mtrr_cap; - struct smm_relocation_params *relo_params; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - relo_params = p->arg; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } + struct smm_relocation_params *relo_params = &smm_reloc_params;
printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu);
@@ -184,7 +171,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) }
/* Make appropriate changes to the save state map. */ - update_save_state(cpu, relo_params, runtime); + update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
/* Write EMRR and SMRR MSRs based on indicated support. */ mtrr_cap = rdmsr(MTRR_CAP_MSR); @@ -250,51 +237,6 @@ static void fill_in_relocation_params(device_t dev, params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; }
-static void adjust_apic_id_map(struct smm_loader_params *smm_params) -{ - struct smm_runtime *runtime; - int i; - - /* Adjust the APIC id map if HT is disabled. */ - if (!ht_disabled) - return; - - runtime = smm_params->runtime; - - /* The APIC ids increment by 2 when HT is disabled. */ - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = runtime->apic_id_to_cpu[i] * 2; -} - -static int install_relocation_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* - * The default SMM entry can happen in parallel or serially. If the - * default SMM entry is done in parallel the BSP has already setup - * the saving state to each CPU's MSRs. At least one save state size - * is required for the initial SMM entry for the BSP to determine if - * parallel SMM relocation is even feasible. Set the stack size to - * the save state size, and call into the do_relocation handler. - */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - .handler_arg = (void *)relo_params, - }; - - if (smm_setup_relocation_handler(&smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - static void setup_ied_area(struct smm_relocation_params *params) { char *ied_base; @@ -317,37 +259,10 @@ static void setup_ied_area(struct smm_relocation_params *params) memset(ied_base + (1 << 20), 0, (32 << 10)); }
-static int install_permanent_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* - * There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. - */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_params->smram_base); - if (smm_load_module((void *)relo_params->smram_base, - relo_params->smram_size, &smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - -static int cpu_smm_setup(void) +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { device_t dev = SA_DEV_ROOT; - int num_cpus; - msr_t msr;
printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
@@ -356,46 +271,21 @@ static int cpu_smm_setup(void) if (smm_reloc_params.ied_size) setup_ied_area(&smm_reloc_params);
- msr = rdmsr(CORE_THREAD_COUNT_MSR); - num_cpus = msr.lo & 0xffff; - if (num_cpus > CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n", - num_cpus, CONFIG_MAX_CPUS); - } - - if (install_relocation_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Relocation handler install failed.\n"); - return -1; - } - - if (install_permanent_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Permanent handler install failed.\n"); - return -1; - } - - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - wbinvd(); - - return 0; }
-int smm_initialize(void) +void smm_initialize(void) { - /* Return early if CPU SMM setup failed. */ - if (cpu_smm_setup()) - return -1; - /* Clear the SMM state in the southbridge. */ southbridge_smm_clear_state();
- /* Run the relocation handler. */ + /* + * Run the relocation handler for on the BSP to check and set up + * parallel SMM relocation. + */ smm_initiate_relocation();
if (smm_reloc_params.smm_save_state_in_msrs) printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n"); - - return 0; }
void smm_relocate(void)