mail.coreboot.org
Sign In
Sign Up
Sign In
Sign Up
Manage this list
×
Keyboard Shortcuts
Thread View
j
: Next unread message
k
: Previous unread message
j a
: Jump to all threads
j l
: Jump to MailingList overview
2024
November
October
September
August
July
June
May
April
March
February
January
2023
December
November
October
September
August
July
June
May
April
March
February
January
2022
December
November
October
September
August
July
June
May
April
March
February
January
2021
December
November
October
September
August
July
June
May
April
March
February
January
2020
December
November
October
September
August
July
June
May
April
March
February
January
2019
December
November
October
September
August
July
June
May
April
March
February
January
2018
December
November
October
September
August
July
June
May
April
March
February
January
2017
December
November
October
September
August
July
June
May
April
March
February
January
2016
December
November
October
September
August
July
June
May
April
March
February
January
2015
December
November
October
September
August
July
June
May
April
March
February
January
2014
December
November
October
September
August
July
June
May
April
March
February
January
2013
December
November
October
September
August
July
June
May
April
March
List overview
Download
coreboot-gerrit
May 2016
----- 2024 -----
November 2024
October 2024
September 2024
August 2024
July 2024
June 2024
May 2024
April 2024
March 2024
February 2024
January 2024
----- 2023 -----
December 2023
November 2023
October 2023
September 2023
August 2023
July 2023
June 2023
May 2023
April 2023
March 2023
February 2023
January 2023
----- 2022 -----
December 2022
November 2022
October 2022
September 2022
August 2022
July 2022
June 2022
May 2022
April 2022
March 2022
February 2022
January 2022
----- 2021 -----
December 2021
November 2021
October 2021
September 2021
August 2021
July 2021
June 2021
May 2021
April 2021
March 2021
February 2021
January 2021
----- 2020 -----
December 2020
November 2020
October 2020
September 2020
August 2020
July 2020
June 2020
May 2020
April 2020
March 2020
February 2020
January 2020
----- 2019 -----
December 2019
November 2019
October 2019
September 2019
August 2019
July 2019
June 2019
May 2019
April 2019
March 2019
February 2019
January 2019
----- 2018 -----
December 2018
November 2018
October 2018
September 2018
August 2018
July 2018
June 2018
May 2018
April 2018
March 2018
February 2018
January 2018
----- 2017 -----
December 2017
November 2017
October 2017
September 2017
August 2017
July 2017
June 2017
May 2017
April 2017
March 2017
February 2017
January 2017
----- 2016 -----
December 2016
November 2016
October 2016
September 2016
August 2016
July 2016
June 2016
May 2016
April 2016
March 2016
February 2016
January 2016
----- 2015 -----
December 2015
November 2015
October 2015
September 2015
August 2015
July 2015
June 2015
May 2015
April 2015
March 2015
February 2015
January 2015
----- 2014 -----
December 2014
November 2014
October 2014
September 2014
August 2014
July 2014
June 2014
May 2014
April 2014
March 2014
February 2014
January 2014
----- 2013 -----
December 2013
November 2013
October 2013
September 2013
August 2013
July 2013
June 2013
May 2013
April 2013
March 2013
coreboot-gerrit@coreboot.org
1 participants
1843 discussions
Start a n
N
ew thread
New patch to review for coreboot: soc/intel/skylake: convert to using common MP and SMM init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14597
-gerrit commit d319bdbc2be35834eb1d30dee7a0364e15c35f42 Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 17:45:59 2016 -0500 soc/intel/skylake: convert to using common MP and SMM init In order to reduce duplication of code use the common MP and SMM initialization flow. Change-Id: I5c4674ed258922b6616d75f070df976ef9fad209 Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/soc/intel/skylake/cpu.c | 147 ++++++++++++++------------------ src/soc/intel/skylake/include/soc/cpu.h | 6 -- src/soc/intel/skylake/include/soc/smm.h | 15 ++-- src/soc/intel/skylake/smmrelocate.c | 144 ++++--------------------------- 4 files changed, 92 insertions(+), 220 deletions(-) diff --git a/src/soc/intel/skylake/cpu.c b/src/soc/intel/skylake/cpu.c index 17fa0ae..b7353ca 100644 --- a/src/soc/intel/skylake/cpu.c +++ b/src/soc/intel/skylake/cpu.c @@ -346,13 +346,6 @@ static void configure_mca(void) wrmsr(IA32_MC0_STATUS + (i * 4), msr); } -static void bsp_init_before_ap_bringup(struct bus *cpu_bus) -{ - /* Setup MTRRs based on physical address size. */ - x86_setup_mtrrs_with_detect(); - x86_mtrr_check(); -} - /* All CPUs including BSP will run the following function. */ static void cpu_core_init(device_t cpu) { @@ -382,48 +375,6 @@ static void cpu_core_init(device_t cpu) enable_turbo(); } -/* MP initialization support. */ -static const void *microcode_patch; -int ht_disabled; - -static int adjust_apic_id_ht_disabled(int index, int apic_id) -{ - return 2 * index; -} - -static void relocate_and_load_microcode(void) -{ - /* Relocate the SMM handler. */ - smm_relocate(); - - /* After SMM relocation a 2nd microcode load is required. */ - intel_microcode_load_unlocked(microcode_patch); -} - -static void enable_smis(void) -{ - /* - * Now that all APs have been relocated as well as the BSP let SMIs - * start flowing. - */ - southbridge_smm_enable_smi(); - - /* Lock down the SMRAM space. */ -#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) - smm_lock(); -#endif -} - -static struct mp_flight_record mp_steps[] = { - MP_FR_NOBLOCK_APS(relocate_and_load_microcode, - relocate_and_load_microcode), -#if IS_ENABLED(CONFIG_SMP) - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish initialization before proceeding. */ -#endif - MP_FR_BLOCK_APS(NULL, enable_smis), -}; - static struct device_operations cpu_dev_ops = { .init = cpu_core_init, }; @@ -439,14 +390,22 @@ static const struct cpu_driver driver __cpu_driver = { .id_table = cpu_table, }; -void soc_init_cpus(device_t dev) +/* MP initialization support. */ +static const void *microcode_patch; +static int ht_disabled; + +static void pre_mp_init(void) { - struct bus *cpu_bus = dev->link_list; + /* Setup MTRRs based on physical address size. */ + x86_setup_mtrrs_with_detect(); + x86_mtrr_check(); +} + +static int get_cpu_count(void) +{ + msr_t msr; int num_threads; int num_cores; - msr_t msr; - struct mp_params mp_params; - void *smm_save_area; msr = rdmsr(CORE_THREAD_COUNT_MSR); num_threads = (msr.lo >> 0) & 0xffff; @@ -456,45 +415,69 @@ void soc_init_cpus(device_t dev) ht_disabled = num_threads == num_cores; - /* - * Perform any necessary BSP initialization before APs are brought up. - * This call also allows the BSP to prepare for any secondary effects - * from calling cpu_initialize() such as smm_init(). - */ - bsp_init_before_ap_bringup(cpu_bus); + return num_threads; +} +static void get_microcode_info(const void **microcode, int *parallel) +{ microcode_patch = intel_microcode_find(); + *microcode = microcode_patch; + *parallel = 1; +} - /* Save default SMM area before relocation occurs. */ - if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) - smm_save_area = backup_default_smm_area(); - else - smm_save_area = NULL; - - mp_params.num_cpus = num_threads; - mp_params.parallel_microcode_load = 1; +static int adjust_apic_id(int index, int apic_id) +{ if (ht_disabled) - mp_params.adjust_apic_id = adjust_apic_id_ht_disabled; + return 2 * index; else - mp_params.adjust_apic_id = NULL; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = microcode_patch; + return index; +} - /* Load relocation and permeanent handlers. Then initiate relocation. */ - if (smm_initialize()) - printk(BIOS_CRIT, "SMM Initialiazation failed...\n"); +static void per_cpu_smm_trigger(void) +{ + /* Relocate the SMM handler. */ + smm_relocate(); - if (IS_ENABLED(CONFIG_SMP)) - if (mp_init(cpu_bus, &mp_params)) - printk(BIOS_ERR, "MP initialization failure.\n"); + /* After SMM relocation a 2nd microcode load is required. */ + intel_microcode_load_unlocked(microcode_patch); +} +static void post_mp_init(void) +{ /* Set Max Ratio */ set_max_ratio(); - /* Restore the default SMM region. */ - if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) - restore_default_smm_area(smm_save_area); + /* + * Now that all APs have been relocated as well as the BSP let SMIs + * start flowing. + */ + southbridge_smm_enable_smi(); + + /* Lock down the SMRAM space. */ +#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) + smm_lock(); +#endif +} + +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_smm_info = smm_info, + .get_microcode_info = get_microcode_info, + .adjust_cpu_apic_entry = adjust_apic_id, + .pre_mp_smm_init = smm_initialize, + .per_cpu_smm_trigger = per_cpu_smm_trigger, + .relocation_handler = smm_relocation_handler, + .post_mp_init = post_mp_init, +}; + +void soc_init_cpus(device_t dev) +{ + struct bus *cpu_bus = dev->link_list; + + if (mp_init_with_smm(cpu_bus, &mp_ops)) { + printk(BIOS_ERR, "MP initialization failure.\n"); + } } int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id) diff --git a/src/soc/intel/skylake/include/soc/cpu.h b/src/soc/intel/skylake/include/soc/cpu.h index 8dd9742..f91a0ef 100644 --- a/src/soc/intel/skylake/include/soc/cpu.h +++ b/src/soc/intel/skylake/include/soc/cpu.h @@ -57,12 +57,6 @@ void set_power_limits(u8 power_limit_1_time); int cpu_config_tdp_levels(void); -/* - * Determine if HyperThreading is disabled. - * The variable is not valid until setup_ap_init() has been called. - */ -extern int ht_disabled; - /* CPU identification */ u32 cpu_family_model(void); u32 cpu_stepping(void); diff --git a/src/soc/intel/skylake/include/soc/smm.h b/src/soc/intel/skylake/include/soc/smm.h index 94bdc55..fa8da46 100644 --- a/src/soc/intel/skylake/include/soc/smm.h +++ b/src/soc/intel/skylake/include/soc/smm.h @@ -53,7 +53,11 @@ void mainboard_smi_gpi_handler(const struct gpi_status *sts); #if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) -int smm_initialize(void); +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase); +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size); +void smm_initialize(void); void smm_relocate(void); /* These helpers are for performing SMM relocation. */ @@ -68,10 +72,11 @@ void southbridge_clear_smi_status(void); void southbridge_smm_clear_state(void); void southbridge_smm_enable_smi(void); #else /* CONFIG_HAVE_SMI_HANDLER */ -static inline int smm_initialize(void) -{ - return 0; -} +static inline void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) {} +static inline void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) {} +static inline void smm_initialize(void) {} static inline void smm_relocate(void) {} static inline void southbridge_trigger_smi(void) {} diff --git a/src/soc/intel/skylake/smmrelocate.c b/src/soc/intel/skylake/smmrelocate.c index a14721f..fcb89de 100644 --- a/src/soc/intel/skylake/smmrelocate.c +++ b/src/soc/intel/skylake/smmrelocate.c @@ -54,9 +54,9 @@ static inline void write_uncore_emrr(struct smm_relocation_params *relo_params) wrmsr(UNCORE_PRMRR_PHYS_MASK_MSR, relo_params->uncore_emrr_mask); } -static void update_save_state(int cpu, - struct smm_relocation_params *relo_params, - const struct smm_runtime *runtime) +static void update_save_state(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase, + struct smm_relocation_params *relo_params) { u32 smbase; u32 iedbase; @@ -66,7 +66,7 @@ static void update_save_state(int cpu, * stagger the entry points adjusting SMBASE downwards by save state * size * CPU num. */ - smbase = relo_params->smram_base - cpu * runtime->save_state_size; + smbase = staggered_smbase; iedbase = relo_params->ied_base; printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n", @@ -101,8 +101,8 @@ static void update_save_state(int cpu, } else { em64t101_smm_state_save_area_t *save_state; - save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE - - runtime->save_state_size); + save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - + sizeof(*save_state)); save_state->smbase = smbase; save_state->iedbase = iedbase; @@ -132,24 +132,11 @@ static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params) * resides in the ramstage module. This occurs by trampolining from the default * SMRAM entry point to here. */ -static void asmlinkage cpu_smm_do_relocation(void *arg) +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { msr_t mtrr_cap; - struct smm_relocation_params *relo_params; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - relo_params = p->arg; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } + struct smm_relocation_params *relo_params = &smm_reloc_params; printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu); @@ -184,7 +171,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) } /* Make appropriate changes to the save state map. */ - update_save_state(cpu, relo_params, runtime); + update_save_state(cpu, curr_smbase, staggered_smbase, relo_params); /* Write EMRR and SMRR MSRs based on indicated support. */ mtrr_cap = rdmsr(MTRR_CAP_MSR); @@ -250,51 +237,6 @@ static void fill_in_relocation_params(device_t dev, params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; } -static void adjust_apic_id_map(struct smm_loader_params *smm_params) -{ - struct smm_runtime *runtime; - int i; - - /* Adjust the APIC id map if HT is disabled. */ - if (!ht_disabled) - return; - - runtime = smm_params->runtime; - - /* The APIC ids increment by 2 when HT is disabled. */ - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = runtime->apic_id_to_cpu[i] * 2; -} - -static int install_relocation_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* - * The default SMM entry can happen in parallel or serially. If the - * default SMM entry is done in parallel the BSP has already setup - * the saving state to each CPU's MSRs. At least one save state size - * is required for the initial SMM entry for the BSP to determine if - * parallel SMM relocation is even feasible. Set the stack size to - * the save state size, and call into the do_relocation handler. - */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - .handler_arg = (void *)relo_params, - }; - - if (smm_setup_relocation_handler(&smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - static void setup_ied_area(struct smm_relocation_params *params) { char *ied_base; @@ -317,37 +259,10 @@ static void setup_ied_area(struct smm_relocation_params *params) memset(ied_base + (1 << 20), 0, (32 << 10)); } -static int install_permanent_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* - * There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. - */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_params->smram_base); - if (smm_load_module((void *)relo_params->smram_base, - relo_params->smram_size, &smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - -static int cpu_smm_setup(void) +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { device_t dev = SA_DEV_ROOT; - int num_cpus; - msr_t msr; printk(BIOS_DEBUG, "Setting up SMI for CPU\n"); @@ -356,46 +271,21 @@ static int cpu_smm_setup(void) if (smm_reloc_params.ied_size) setup_ied_area(&smm_reloc_params); - msr = rdmsr(CORE_THREAD_COUNT_MSR); - num_cpus = msr.lo & 0xffff; - if (num_cpus > CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n", - num_cpus, CONFIG_MAX_CPUS); - } - - if (install_relocation_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Relocation handler install failed.\n"); - return -1; - } - - if (install_permanent_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Permanent handler install failed.\n"); - return -1; - } - - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - wbinvd(); - - return 0; } -int smm_initialize(void) +void smm_initialize(void) { - /* Return early if CPU SMM setup failed. */ - if (cpu_smm_setup()) - return -1; - /* Clear the SMM state in the southbridge. */ southbridge_smm_clear_state(); - /* Run the relocation handler. */ + /* + * Run the relocation handler for on the BSP to check and set up + * parallel SMM relocation. + */ smm_initiate_relocation(); if (smm_reloc_params.smm_save_state_in_msrs) printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n"); - - return 0; } void smm_relocate(void)
1
0
0
0
New patch to review for coreboot: cpu/intel/haswell: convert to using common MP and SMM init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14596
-gerrit commit ea95577d11ad32bd56f4937da67a77cd4b5a3d56 Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 17:26:35 2016 -0500 cpu/intel/haswell: convert to using common MP and SMM init In order to reduce duplication of code use the common MP and SMM initialization flow. Change-Id: I80b5b94b62bdd001581eb56513a0d532fffb64e8 Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/cpu/intel/haswell/haswell.h | 8 +- src/cpu/intel/haswell/haswell_init.c | 124 +++++++++++++--------------- src/cpu/intel/haswell/smmrelocate.c | 153 ++++++----------------------------- 3 files changed, 88 insertions(+), 197 deletions(-) diff --git a/src/cpu/intel/haswell/haswell.h b/src/cpu/intel/haswell/haswell.h index 9236b77..8298fb1 100644 --- a/src/cpu/intel/haswell/haswell.h +++ b/src/cpu/intel/haswell/haswell.h @@ -193,14 +193,16 @@ void intel_cpu_haswell_finalize_smm(void); /* Configure power limits for turbo mode */ void set_power_limits(u8 power_limit_1_time); int cpu_config_tdp_levels(void); -/* Returns 0 on success, < 0 on failure. */ -int smm_initialize(void); +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase); +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size); +void smm_initialize(void); void smm_relocate(void); struct bus; void bsp_init_and_start_aps(struct bus *cpu_bus); /* Determine if HyperThreading is disabled. The variable is not valid until * setup_ap_init() has been called. */ -extern int ht_disabled; #endif /* CPU identification */ diff --git a/src/cpu/intel/haswell/haswell_init.c b/src/cpu/intel/haswell/haswell_init.c index f488ef6..74147ab 100644 --- a/src/cpu/intel/haswell/haswell_init.c +++ b/src/cpu/intel/haswell/haswell_init.c @@ -714,20 +714,6 @@ static void configure_mca(void) wrmsr(IA32_MC0_STATUS + (i * 4), msr); } -static void bsp_init_before_ap_bringup(struct bus *cpu_bus) -{ - /* Setup MTRRs based on physical address size. */ - x86_setup_mtrrs_with_detect(); - x86_mtrr_check(); - - initialize_vr_config(); - - if (haswell_is_ult()) { - calibrate_24mhz_bclk(); - configure_pch_power_sharing(); - } -} - /* All CPUs including BSP will run the following function. */ static void haswell_init(struct device *cpu) { @@ -765,47 +751,27 @@ static void haswell_init(struct device *cpu) /* MP initialization support. */ static const void *microcode_patch; -int ht_disabled; +static int ht_disabled; -static int adjust_apic_id_ht_disabled(int index, int apic_id) +static void pre_mp_init(void) { - return 2 * index; -} - -static void relocate_and_load_microcode(void) -{ - /* Relocate the SMM handler. */ - smm_relocate(); - - /* After SMM relocation a 2nd microcode load is required. */ - intel_microcode_load_unlocked(microcode_patch); -} + /* Setup MTRRs based on physical address size. */ + x86_setup_mtrrs_with_detect(); + x86_mtrr_check(); -static void enable_smis(void) -{ - /* Now that all APs have been relocated as well as the BSP let SMIs - * start flowing. */ - southbridge_smm_enable_smi(); + initialize_vr_config(); - /* Lock down the SMRAM space. */ - smm_lock(); + if (haswell_is_ult()) { + calibrate_24mhz_bclk(); + configure_pch_power_sharing(); + } } -static struct mp_flight_record mp_steps[] = { - MP_FR_NOBLOCK_APS(relocate_and_load_microcode, - relocate_and_load_microcode), - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish initialization before proceeding. */ - MP_FR_BLOCK_APS(NULL, enable_smis), -}; - -void bsp_init_and_start_aps(struct bus *cpu_bus) +static int get_cpu_count(void) { - void *smm_save_area; + msr_t msr; int num_threads; int num_cores; - msr_t msr; - struct mp_params mp_params; msr = rdmsr(CORE_THREAD_COUNT_MSR); num_threads = (msr.lo >> 0) & 0xffff; @@ -815,36 +781,60 @@ void bsp_init_and_start_aps(struct bus *cpu_bus) ht_disabled = num_threads == num_cores; - /* Perform any necessary BSP initialization before APs are brought up. - * This call also allows the BSP to prepare for any secondary effects - * from calling cpu_initialize() such as smm_init(). */ - bsp_init_before_ap_bringup(cpu_bus); + return num_threads; +} +static void get_microcode_info(const void **microcode, int *parallel) +{ microcode_patch = intel_microcode_find(); + *microcode = microcode_patch; + *parallel = 1; +} - /* Save default SMM area before relocation occurs. */ - smm_save_area = backup_default_smm_area(); - - mp_params.num_cpus = num_threads; - mp_params.parallel_microcode_load = 1; +static int adjust_apic_id(int index, int apic_id) +{ if (ht_disabled) - mp_params.adjust_apic_id = adjust_apic_id_ht_disabled; + return 2 * index; else - mp_params.adjust_apic_id = NULL; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = microcode_patch; + return index; +} + +static void per_cpu_smm_trigger(void) +{ + /* Relocate the SMM handler. */ + smm_relocate(); - /* Load relocation and permeanent handlers. Then initiate relocation. */ - if (smm_initialize()) - printk(BIOS_CRIT, "SMM Initialiazation failed...\n"); + /* After SMM relocation a 2nd microcode load is required. */ + intel_microcode_load_unlocked(microcode_patch); +} - if (mp_init(cpu_bus, &mp_params)) { +static void post_mp_init(void) +{ + /* Now that all APs have been relocated as well as the BSP let SMIs + * start flowing. */ + southbridge_smm_enable_smi(); + + /* Lock down the SMRAM space. */ + smm_lock(); +} + +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_smm_info = smm_info, + .get_microcode_info = get_microcode_info, + .adjust_cpu_apic_entry = adjust_apic_id, + .pre_mp_smm_init = smm_initialize, + .per_cpu_smm_trigger = per_cpu_smm_trigger, + .relocation_handler = smm_relocation_handler, + .post_mp_init = post_mp_init, +}; + +void bsp_init_and_start_aps(struct bus *cpu_bus) +{ + if (mp_init_with_smm(cpu_bus, &mp_ops)) { printk(BIOS_ERR, "MP initialization failure.\n"); } - - /* Restore the default SMM region. */ - restore_default_smm_area(smm_save_area); } static struct device_operations cpu_dev_ops = { diff --git a/src/cpu/intel/haswell/smmrelocate.c b/src/cpu/intel/haswell/smmrelocate.c index 99a972e..5c50ad1c 100644 --- a/src/cpu/intel/haswell/smmrelocate.c +++ b/src/cpu/intel/haswell/smmrelocate.c @@ -91,9 +91,9 @@ static inline void write_uncore_emrr(struct smm_relocation_params *relo_params) wrmsr(UNCORE_EMRRphysMask_MSR, relo_params->uncore_emrr_mask); } -static void update_save_state(int cpu, - struct smm_relocation_params *relo_params, - const struct smm_runtime *runtime) +static void update_save_state(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase, + struct smm_relocation_params *relo_params) { u32 smbase; u32 iedbase; @@ -101,7 +101,7 @@ static void update_save_state(int cpu, /* The relocated handler runs with all CPUs concurrently. Therefore * stagger the entry points adjusting SMBASE downwards by save state * size * CPU num. */ - smbase = relo_params->smram_base - cpu * runtime->save_state_size; + smbase = staggered_smbase; iedbase = relo_params->ied_base; printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n", @@ -132,8 +132,8 @@ static void update_save_state(int cpu, } else { em64t101_smm_state_save_area_t *save_state; - save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE - - runtime->save_state_size); + save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - + sizeof(*save_state)); save_state->smbase = smbase; save_state->iedbase = iedbase; @@ -161,24 +161,11 @@ static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params) /* The relocation work is actually performed in SMM context, but the code * resides in the ramstage module. This occurs by trampolining from the default * SMRAM entry point to here. */ -static void asmlinkage cpu_smm_do_relocation(void *arg) +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { msr_t mtrr_cap; - struct smm_relocation_params *relo_params; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - relo_params = p->arg; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } + struct smm_relocation_params *relo_params = &smm_reloc_params; printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu); @@ -207,7 +194,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) } /* Make appropriate changes to the save state map. */ - update_save_state(cpu, relo_params, runtime); + update_save_state(cpu, curr_smbase, staggered_smbase, relo_params); /* Write EMRR and SMRR MSRs based on indicated support. */ mtrr_cap = rdmsr(MTRR_CAP_MSR); @@ -290,49 +277,6 @@ static void fill_in_relocation_params(struct device *dev, params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; } -static void adjust_apic_id_map(struct smm_loader_params *smm_params) -{ - struct smm_runtime *runtime; - int i; - - /* Adjust the APIC id map if HT is disabled. */ - if (!ht_disabled) - return; - - runtime = smm_params->runtime; - - /* The APIC ids increment by 2 when HT is disabled. */ - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = runtime->apic_id_to_cpu[i] * 2; -} - -static int install_relocation_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* The default SMM entry can happen in parallel or serially. If the - * default SMM entry is done in parallel the BSP has already setup - * the saving state to each CPU's MSRs. At least one save state size - * is required for the initial SMM entry for the BSP to determine if - * parallel SMM relocation is even feasible. Set the stack size to - * the save state size, and call into the do_relocation handler. */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - .handler_arg = (void *)relo_params, - }; - - if (smm_setup_relocation_handler(&smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - static void setup_ied_area(struct smm_relocation_params *params) { char *ied_base; @@ -357,88 +301,43 @@ static void setup_ied_area(struct smm_relocation_params *params) //memset(ied_base + (2 << 20), 0, (2 << 20)); } -static int install_permanent_handler(int num_cpus, - struct smm_relocation_params *relo_params) +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { - /* There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_params->smram_base); - if (smm_load_module((void *)relo_params->smram_base, - relo_params->smram_size, &smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - -static int cpu_smm_setup(void) -{ - struct device *dev; - int num_cpus; - msr_t msr; + device_t dev = dev_find_slot(0, PCI_DEVFN(0, 0)); printk(BIOS_DEBUG, "Setting up SMI for CPU\n"); - dev = dev_find_slot(0, PCI_DEVFN(0, 0)); - fill_in_relocation_params(dev, &smm_reloc_params); setup_ied_area(&smm_reloc_params); - msr = rdmsr(CORE_THREAD_COUNT_MSR); - num_cpus = msr.lo & 0xffff; - if (num_cpus > CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n", - num_cpus, CONFIG_MAX_CPUS); - } - - if (install_relocation_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Relocation handler install failed.\n"); - return -1; - } - - if (install_permanent_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Permanent handler install failed.\n"); - return -1; - } - - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - /* TODO(adurbin): Is this really needed? */ - wbinvd(); - - return 0; + *perm_smbase = smm_reloc_params.smram_base; + *perm_smsize = smm_reloc_params.smram_size; + *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t); } -int smm_initialize(void) +void smm_initialize(void) { - /* Return early if CPU SMM setup failed. */ - if (cpu_smm_setup()) - return -1; - /* Clear the SMM state in the southbridge. */ southbridge_smm_clear_state(); - /* Run the relocation handler. */ + /* + * Run the relocation handler for on the BSP to check and set up + * parallel SMM relocation. + */ smm_initiate_relocation(); if (smm_reloc_params.smm_save_state_in_msrs) { printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n"); } - - return 0; } +/* The default SMM entry can happen in parallel or serially. If the + * default SMM entry is done in parallel the BSP has already setup + * the saving state to each CPU's MSRs. At least one save state size + * is required for the initial SMM entry for the BSP to determine if + * parallel SMM relocation is even feasible. */ void smm_relocate(void) { /*
1
0
0
0
New patch to review for coreboot: soc/intel/broadwell: convert to using common MP and SMM init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14595
-gerrit commit b3e0911900a35f64a9efde77da6e9de66ac0b951 Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 16:48:19 2016 -0500 soc/intel/broadwell: convert to using common MP and SMM init In order to reduce duplication of code use the common MP and SMM initialization flow. Change-Id: I74c81c5d18dff7a84bfedbe07f01e536c0f641fa Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/soc/intel/broadwell/cpu.c | 139 +++++++++++++--------------- src/soc/intel/broadwell/include/soc/cpu.h | 6 -- src/soc/intel/broadwell/include/soc/smm.h | 6 +- src/soc/intel/broadwell/smmrelocate.c | 149 +++++------------------------- 4 files changed, 95 insertions(+), 205 deletions(-) diff --git a/src/soc/intel/broadwell/cpu.c b/src/soc/intel/broadwell/cpu.c index 15c003e..2afdfad 100644 --- a/src/soc/intel/broadwell/cpu.c +++ b/src/soc/intel/broadwell/cpu.c @@ -570,17 +570,6 @@ static void configure_mca(void) wrmsr(IA32_MC0_STATUS + (i * 4), msr); } -static void bsp_init_before_ap_bringup(struct bus *cpu_bus) -{ - /* Setup MTRRs based on physical address size. */ - x86_setup_mtrrs_with_detect(); - x86_mtrr_check(); - - initialize_vr_config(); - calibrate_24mhz_bclk(); - configure_pch_power_sharing(); -} - /* All CPUs including BSP will run the following function. */ static void cpu_core_init(device_t cpu) { @@ -612,14 +601,52 @@ static void cpu_core_init(device_t cpu) /* MP initialization support. */ static const void *microcode_patch; -int ht_disabled; +static int ht_disabled; + +static void pre_mp_init(void) +{ + /* Setup MTRRs based on physical address size. */ + x86_setup_mtrrs_with_detect(); + x86_mtrr_check(); + + initialize_vr_config(); + calibrate_24mhz_bclk(); + configure_pch_power_sharing(); +} + +static int get_cpu_count(void) +{ + msr_t msr; + int num_threads; + int num_cores; -static int adjust_apic_id_ht_disabled(int index, int apic_id) + msr = rdmsr(CORE_THREAD_COUNT_MSR); + num_threads = (msr.lo >> 0) & 0xffff; + num_cores = (msr.lo >> 16) & 0xffff; + printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n", + num_cores, num_threads); + + ht_disabled = num_threads == num_cores; + + return num_threads; +} + +static void get_microcode_info(const void **microcode, int *parallel) +{ + microcode_patch = intel_microcode_find(); + *microcode = microcode_patch; + *parallel = 1; +} + +static int adjust_apic_id(int index, int apic_id) { - return 2 * index; + if (ht_disabled) + return 2 * index; + else + return index; } -static void relocate_and_load_microcode(void) +static void per_cpu_smm_trigger(void) { /* Relocate the SMM handler. */ smm_relocate(); @@ -628,8 +655,11 @@ static void relocate_and_load_microcode(void) intel_microcode_load_unlocked(microcode_patch); } -static void enable_smis(void) +static void post_mp_init(void) { + /* Set Max Ratio */ + set_max_ratio(); + /* Now that all APs have been relocated as well as the BSP let SMIs * start flowing. */ southbridge_smm_enable_smi(); @@ -638,14 +668,27 @@ static void enable_smis(void) smm_lock(); } -static struct mp_flight_record mp_steps[] = { - MP_FR_NOBLOCK_APS(relocate_and_load_microcode, - relocate_and_load_microcode), - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish initialization before proceeding. */ - MP_FR_BLOCK_APS(NULL, enable_smis), +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_smm_info = smm_info, + .get_microcode_info = get_microcode_info, + .adjust_cpu_apic_entry = adjust_apic_id, + .pre_mp_smm_init = smm_initialize, + .per_cpu_smm_trigger = per_cpu_smm_trigger, + .relocation_handler = smm_relocation_handler, + .post_mp_init = post_mp_init, }; +void broadwell_init_cpus(device_t dev) +{ + struct bus *cpu_bus = dev->link_list; + + if (mp_init_with_smm(cpu_bus, &mp_ops)) { + printk(BIOS_ERR, "MP initialization failure.\n"); + } +} + static struct device_operations cpu_dev_ops = { .init = cpu_core_init, }; @@ -662,55 +705,3 @@ static const struct cpu_driver driver __cpu_driver = { .ops = &cpu_dev_ops, .id_table = cpu_table, }; - -void broadwell_init_cpus(device_t dev) -{ - struct bus *cpu_bus = dev->link_list; - int num_threads; - int num_cores; - msr_t msr; - struct mp_params mp_params; - void *smm_save_area; - - msr = rdmsr(CORE_THREAD_COUNT_MSR); - num_threads = (msr.lo >> 0) & 0xffff; - num_cores = (msr.lo >> 16) & 0xffff; - printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n", - num_cores, num_threads); - - ht_disabled = num_threads == num_cores; - - /* Perform any necessary BSP initialization before APs are brought up. - * This call also allows the BSP to prepare for any secondary effects - * from calling cpu_initialize() such as smm_init(). */ - bsp_init_before_ap_bringup(cpu_bus); - - microcode_patch = intel_microcode_find(); - - /* Save default SMM area before relocation occurs. */ - smm_save_area = backup_default_smm_area(); - - mp_params.num_cpus = num_threads; - mp_params.parallel_microcode_load = 1; - if (ht_disabled) - mp_params.adjust_apic_id = adjust_apic_id_ht_disabled; - else - mp_params.adjust_apic_id = NULL; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = microcode_patch; - - /* Load relocation and permanent handlers. Then initiate relocation. */ - if (smm_initialize()) - printk(BIOS_CRIT, "SMM initialization failed...\n"); - - if (mp_init(cpu_bus, &mp_params)) { - printk(BIOS_ERR, "MP initialization failure.\n"); - } - - /* Set Max Ratio */ - set_max_ratio(); - - /* Restore the default SMM region. */ - restore_default_smm_area(smm_save_area); -} diff --git a/src/soc/intel/broadwell/include/soc/cpu.h b/src/soc/intel/broadwell/include/soc/cpu.h index 3610e64..9a9e0f4 100644 --- a/src/soc/intel/broadwell/include/soc/cpu.h +++ b/src/soc/intel/broadwell/include/soc/cpu.h @@ -55,12 +55,6 @@ void set_power_limits(u8 power_limit_1_time); int cpu_config_tdp_levels(void); -/* - * Determine if HyperThreading is disabled. - * The variable is not valid until setup_ap_init() has been called. - */ -extern int ht_disabled; - /* CPU identification */ u32 cpu_family_model(void); u32 cpu_stepping(void); diff --git a/src/soc/intel/broadwell/include/soc/smm.h b/src/soc/intel/broadwell/include/soc/smm.h index 7f5d6b7..a5247c4 100644 --- a/src/soc/intel/broadwell/include/soc/smm.h +++ b/src/soc/intel/broadwell/include/soc/smm.h @@ -53,7 +53,11 @@ static inline int smm_region_size(void) return CONFIG_SMM_TSEG_SIZE; } -int smm_initialize(void); +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase); +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size); +void smm_initialize(void); void smm_relocate(void); /* These helpers are for performing SMM relocation. */ diff --git a/src/soc/intel/broadwell/smmrelocate.c b/src/soc/intel/broadwell/smmrelocate.c index 18119f8..0cc6399 100644 --- a/src/soc/intel/broadwell/smmrelocate.c +++ b/src/soc/intel/broadwell/smmrelocate.c @@ -60,9 +60,9 @@ static inline void write_uncore_emrr(struct smm_relocation_params *relo_params) wrmsr(UNCORE_EMRRphysMask_MSR, relo_params->uncore_emrr_mask); } -static void update_save_state(int cpu, - struct smm_relocation_params *relo_params, - const struct smm_runtime *runtime) +static void update_save_state(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase, + struct smm_relocation_params *relo_params) { u32 smbase; u32 iedbase; @@ -70,7 +70,7 @@ static void update_save_state(int cpu, /* The relocated handler runs with all CPUs concurrently. Therefore * stagger the entry points adjusting SMBASE downwards by save state * size * CPU num. */ - smbase = relo_params->smram_base - cpu * runtime->save_state_size; + smbase = staggered_smbase; iedbase = relo_params->ied_base; printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n", @@ -101,8 +101,8 @@ static void update_save_state(int cpu, } else { em64t101_smm_state_save_area_t *save_state; - save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE - - runtime->save_state_size); + save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - + sizeof(*save_state)); save_state->smbase = smbase; save_state->iedbase = iedbase; @@ -130,24 +130,11 @@ static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params) /* The relocation work is actually performed in SMM context, but the code * resides in the ramstage module. This occurs by trampolining from the default * SMRAM entry point to here. */ -static void asmlinkage cpu_smm_do_relocation(void *arg) +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { msr_t mtrr_cap; - struct smm_relocation_params *relo_params; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - relo_params = p->arg; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } + struct smm_relocation_params *relo_params = &smm_reloc_params; printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu); @@ -176,7 +163,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) } /* Make appropriate changes to the save state map. */ - update_save_state(cpu, relo_params, runtime); + update_save_state(cpu, curr_smbase, staggered_smbase, relo_params); /* Write EMRR and SMRR MSRs based on indicated support. */ mtrr_cap = rdmsr(MTRR_CAP_MSR); @@ -259,49 +246,6 @@ static void fill_in_relocation_params(device_t dev, params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; } -static void adjust_apic_id_map(struct smm_loader_params *smm_params) -{ - struct smm_runtime *runtime; - int i; - - /* Adjust the APIC id map if HT is disabled. */ - if (!ht_disabled) - return; - - runtime = smm_params->runtime; - - /* The APIC ids increment by 2 when HT is disabled. */ - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = runtime->apic_id_to_cpu[i] * 2; -} - -static int install_relocation_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* The default SMM entry can happen in parallel or serially. If the - * default SMM entry is done in parallel the BSP has already setup - * the saving state to each CPU's MSRs. At least one save state size - * is required for the initial SMM entry for the BSP to determine if - * parallel SMM relocation is even feasible. Set the stack size to - * the save state size, and call into the do_relocation handler. */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - .handler_arg = (void *)relo_params, - }; - - if (smm_setup_relocation_handler(&smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - static void setup_ied_area(struct smm_relocation_params *params) { char *ied_base; @@ -321,35 +265,10 @@ static void setup_ied_area(struct smm_relocation_params *params) memset(ied_base + (1 << 20), 0, (32 << 10)); } -static int install_permanent_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_params->smram_base); - if (smm_load_module((void *)relo_params->smram_base, - relo_params->smram_size, &smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - -static int cpu_smm_setup(void) +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { device_t dev = SA_DEV_ROOT; - int num_cpus; - msr_t msr; printk(BIOS_DEBUG, "Setting up SMI for CPU\n"); @@ -357,50 +276,32 @@ static int cpu_smm_setup(void) setup_ied_area(&smm_reloc_params); - msr = rdmsr(CORE_THREAD_COUNT_MSR); - num_cpus = msr.lo & 0xffff; - if (num_cpus > CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n", - num_cpus, CONFIG_MAX_CPUS); - } - - if (install_relocation_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Relocation handler install failed.\n"); - return -1; - } - - if (install_permanent_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Permanent handler install failed.\n"); - return -1; - } - - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - /* TODO(adurbin): Is this really needed? */ - wbinvd(); - - return 0; + *perm_smbase = smm_reloc_params.smram_base; + *perm_smsize = smm_reloc_params.smram_size; + *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t); } -int smm_initialize(void) +void smm_initialize(void) { - /* Return early if CPU SMM setup failed. */ - if (cpu_smm_setup()) - return -1; - /* Clear the SMM state in the southbridge. */ southbridge_smm_clear_state(); - /* Run the relocation handler. */ + /* + * Run the relocation handler for on the BSP to check and set up + * parallel SMM relocation. + */ smm_initiate_relocation(); if (smm_reloc_params.smm_save_state_in_msrs) { printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n"); } - - return 0; } +/* The default SMM entry can happen in parallel or serially. If the + * default SMM entry is done in parallel the BSP has already setup + * the saving state to each CPU's MSRs. At least one save state size + * is required for the initial SMM entry for the BSP to determine if + * parallel SMM relocation is even feasible. */ void smm_relocate(void) { /*
1
0
0
0
New patch to review for coreboot: soc/intel/apollolake: convert to using common MP init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14594
-gerrit commit 1bec32406e6bd8f1d02f0594dec033ffde13b710 Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 15:56:24 2016 -0500 soc/intel/apollolake: convert to using common MP init In order to reduce duplication of code use the common MP initialization flow. Change-Id: I8cfb5ba6f6a31fecde2ce3bf997f87c4486ab3ab Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/soc/intel/apollolake/cpu.c | 43 ++++++++++++++++++------------------------ 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/src/soc/intel/apollolake/cpu.c b/src/soc/intel/apollolake/cpu.c index 21cd33a..99cb5ad 100644 --- a/src/soc/intel/apollolake/cpu.c +++ b/src/soc/intel/apollolake/cpu.c @@ -56,12 +56,25 @@ static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt) * creates the MTRR solution that the APs will use. Otherwise APs will try to * apply the incomplete solution as the BSP is calculating it. */ -static void bsp_pre_mp_setup(void) +static void pre_mp_init(void) { x86_setup_mtrrs_with_detect(); x86_mtrr_check(); } +/* Find CPU topology */ +static int get_cpu_count(void) +{ + unsigned int num_virt_cores, num_phys_cores; + + read_cpu_topology(&num_phys_cores, &num_virt_cores); + + printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n", + num_phys_cores, num_virt_cores); + + return num_virt_cores; +} + /* * CPU initialization recipe * @@ -69,34 +82,14 @@ static void bsp_pre_mp_setup(void) * the microcode on all cores before releasing them from reset. That means that * the BSP and all APs will come up with the same microcode revision. */ -static struct mp_flight_record flight_plan[] = { - /* NOTE: MTRR solution must be calculated before firing up the APs */ - MP_FR_NOBLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, }; void apollolake_init_cpus(device_t dev) { - unsigned int num_virt_cores, num_phys_cores; - - /* Pre-flight check */ - bsp_pre_mp_setup(); - - /* Find CPU topology */ - read_cpu_topology(&num_phys_cores, &num_virt_cores); - printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n", - num_phys_cores, num_virt_cores); - - /* Systems check */ - struct mp_params flight_data_recorder = { - .num_cpus = num_virt_cores, - .parallel_microcode_load = 0, - .microcode_pointer = NULL, - .adjust_apic_id = NULL, - .flight_plan = flight_plan, - .num_records = ARRAY_SIZE(flight_plan), - }; - /* Clear for take-off */ - if (mp_init(dev->link_list, &flight_data_recorder) < 0) + if (mp_init_with_smm(dev->link_list, &mp_ops) < 0) printk(BIOS_ERR, "MP initialization failure.\n"); }
1
0
0
0
New patch to review for coreboot: soc/intel/braswell: convert to using common MP and SMM init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14593
-gerrit commit 88dac2168a57ae3286647cb10925def56b488811 Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 15:47:48 2016 -0500 soc/intel/braswell: convert to using common MP and SMM init In order to reduce duplication of code use the common MP and SMM initialization flow. Change-Id: I65beefec53a29b2861433bc42679f3fa571d5b6a Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/soc/intel/braswell/cpu.c | 281 ++++++++++++++----------------------------- 1 file changed, 90 insertions(+), 191 deletions(-) diff --git a/src/soc/intel/braswell/cpu.c b/src/soc/intel/braswell/cpu.c index 3d682b7..aae553c 100644 --- a/src/soc/intel/braswell/cpu.c +++ b/src/soc/intel/braswell/cpu.c @@ -33,40 +33,8 @@ #include <soc/smm.h> #include <stdlib.h> -static void smm_relocate(void); -static void enable_smis(void); -static void pre_smm_relocation(void); - -static struct mp_flight_record mp_steps[] = { - MP_FR_BLOCK_APS(pre_smm_relocation, pre_smm_relocation), - MP_FR_BLOCK_APS(smm_relocate, smm_relocate), - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish initialization before proceeding. */ - MP_FR_BLOCK_APS(NULL, enable_smis), -}; - -/* The APIC id space is sparse. Each id is separated by 2. */ -static int adjust_apic_id(int index, int apic_id) -{ - return 2 * index; -} - -/* Package level MSRs */ -const struct reg_script package_msr_script[] = { - /* Set Package TDP to ~7W */ - REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa), - REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0), - REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702), - REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b), - REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0), - REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305), - REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d), - REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27), - REG_SCRIPT_END -}; - /* Core level MSRs */ -const struct reg_script core_msr_script[] = { +static const struct reg_script core_msr_script[] = { /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */ REG_MSR_RMW(MSR_PMG_CST_CONFIG_CONTROL, ~0x3f080f, 0xe0008), REG_MSR_RMW(MSR_POWER_MISC, @@ -77,50 +45,6 @@ const struct reg_script core_msr_script[] = { REG_SCRIPT_END }; -void soc_init_cpus(device_t dev) -{ - struct bus *cpu_bus = dev->link_list; - const struct pattrs *pattrs = pattrs_get(); - struct mp_params mp_params; - void *default_smm_area; - uint32_t bsmrwac; - - printk(BIOS_SPEW, "%s/%s ( %s )\n", - __FILE__, __func__, dev_name(dev)); - - /* Set up MTRRs based on physical address size. */ - x86_setup_mtrrs_with_detect(); - x86_mtrr_check(); - - mp_params.num_cpus = pattrs->num_cpus, - mp_params.parallel_microcode_load = 1, - mp_params.adjust_apic_id = adjust_apic_id; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = pattrs->microcode_patch; - - default_smm_area = backup_default_smm_area(); - - /* - * Configure the BUNIT to allow dirty cache line evictions in non-SMM - * mode for the lines that were dirtied while in SMM mode. Otherwise - * the writes would be silently dropped. - */ - bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; - iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); - - /* Set package MSRs */ - reg_script_run(package_msr_script); - - /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ - enable_turbo(); - - if (mp_init(cpu_bus, &mp_params)) - printk(BIOS_ERR, "MP initialization failure.\n"); - - restore_default_smm_area(default_smm_area); -} - static void soc_core_init(device_t cpu) { printk(BIOS_SPEW, "%s/%s ( %s )\n", @@ -161,7 +85,7 @@ static const struct cpu_driver driver __cpu_driver = { /* - * SMM loading and initialization. + * MP and SMM loading initialization. */ struct smm_relocation_attrs { @@ -172,108 +96,58 @@ struct smm_relocation_attrs { static struct smm_relocation_attrs relo_attrs; -static void adjust_apic_id_map(struct smm_loader_params *smm_params) -{ - int i; - struct smm_runtime *runtime = smm_params->runtime; - - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = mp_get_apic_id(i); -} +/* Package level MSRs */ +static const struct reg_script package_msr_script[] = { + /* Set Package TDP to ~7W */ + REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa), + REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0), + REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702), + REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b), + REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0), + REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305), + REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d), + REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27), + REG_SCRIPT_END +}; -static void asmlinkage cpu_smm_do_relocation(void *arg) +static void pre_mp_init(void) { - msr_t smrr; - em64t100_smm_state_save_area_t *smm_state; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } + uint32_t bsmrwac; - /* Set up SMRR. */ - smrr.lo = relo_attrs.smrr_base; - smrr.hi = 0; - wrmsr(SMRR_PHYS_BASE, smrr); - smrr.lo = relo_attrs.smrr_mask; - smrr.hi = 0; - wrmsr(SMRR_PHYS_MASK, smrr); + /* Set up MTRRs based on physical address size. */ + x86_setup_mtrrs_with_detect(); + x86_mtrr_check(); /* - * The relocated handler runs with all CPUs concurrently. Therefore - * stagger the entry points adjusting SMBASE downwards by save state - * size * CPU num. + * Configure the BUNIT to allow dirty cache line evictions in non-SMM + * mode for the lines that were dirtied while in SMM mode. Otherwise + * the writes would be silently dropped. */ - smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + runtime->smbase); - smm_state->smbase = relo_attrs.smbase - cpu * runtime->save_state_size; - printk(BIOS_DEBUG, "New SMBASE 0x%08x\n", smm_state->smbase); -} - -static int install_relocation_handler(int num_cpus) -{ - const int save_state_size = sizeof(em64t100_smm_state_save_area_t); + bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; + iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - }; + /* Set package MSRs */ + reg_script_run(package_msr_script); - if (smm_setup_relocation_handler(&smm_params)) - return -1; + /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ + enable_turbo(); +} - adjust_apic_id_map(&smm_params); +static int get_cpu_count(void) +{ + const struct pattrs *pattrs = pattrs_get(); - return 0; + return pattrs->num_cpus; } -static int install_permanent_handler(int num_cpus) +static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { - /* - * There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. - */ - int save_state_size = sizeof(em64t100_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; void *smm_base; size_t smm_size; - int tseg_size; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_attrs.smbase); - - smm_region(&smm_base, &smm_size); - tseg_size = smm_size - CONFIG_SMM_RESERVED_SIZE; - if (smm_load_module((void *)relo_attrs.smbase, tseg_size, &smm_params)) - return -1; - adjust_apic_id_map(&smm_params); - - return 0; -} - -static int smm_load_handlers(void) -{ /* All range registers are aligned to 4KiB */ const uint32_t rmask = ~((1 << 12) - 1); - const struct pattrs *pattrs = pattrs_get(); - void *smm_base; - size_t smm_size; /* Initialize global tracking state. */ smm_region(&smm_base, &smm_size); @@ -282,24 +156,26 @@ static int smm_load_handlers(void) relo_attrs.smrr_mask = ~(smm_size - 1) & rmask; relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; - /* Install handlers. */ - if (install_relocation_handler(pattrs->num_cpus) < 0) { - printk(BIOS_ERR, "Unable to install SMM relocation handler.\n"); - return -1; - } + *perm_smbase = relo_attrs.smbase; + *perm_smsize = smm_size - CONFIG_SMM_RESERVED_SIZE; + *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t); +} - if (install_permanent_handler(pattrs->num_cpus) < 0) { - printk(BIOS_ERR, "Unable to install SMM permanent handler.\n"); - return -1; - } +/* The APIC id space on Bay Trail is sparse. Each id is separated by 2. */ +static int adjust_apic_id(int index, int apic_id) +{ + return 2 * index; +} - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - wbinvd(); +static void get_microcode_info(const void **microcode, int *parallel) +{ + const struct pattrs *pattrs = pattrs_get(); - return 0; + *microcode = pattrs->microcode_patch; + *parallel = 1; } -static void pre_smm_relocation(void) +static void per_cpu_smm_trigger(void) { const struct pattrs *pattrs = pattrs_get(); msr_t msr_value; @@ -308,20 +184,6 @@ static void pre_smm_relocation(void) msr_value = rdmsr(MSR_IA32_BIOS_SIGN_ID); if (msr_value.hi == 0) intel_microcode_load_unlocked(pattrs->microcode_patch); -} - -static void smm_relocate(void) -{ - const struct pattrs *pattrs = pattrs_get(); - - /* Load relocation and permanent handler. */ - if (boot_cpu()) { - if (smm_load_handlers() < 0) { - printk(BIOS_ERR, "Error loading SMM handlers.\n"); - return; - } - southcluster_smm_clear_state(); - } /* Relocate SMM space. */ smm_initiate_relocation(); @@ -330,7 +192,44 @@ static void smm_relocate(void) intel_microcode_load_unlocked(pattrs->microcode_patch); } -static void enable_smis(void) +static void relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { - southcluster_smm_enable_smi(); + msr_t smrr; + em64t100_smm_state_save_area_t *smm_state; + + /* Set up SMRR. */ + smrr.lo = relo_attrs.smrr_base; + smrr.hi = 0; + wrmsr(SMRR_PHYS_BASE, smrr); + smrr.lo = relo_attrs.smrr_mask; + smrr.hi = 0; + wrmsr(SMRR_PHYS_MASK, smrr); + + smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase); + smm_state->smbase = staggered_smbase; +} + +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_smm_info = get_smm_info, + .get_microcode_info = get_microcode_info, + .adjust_cpu_apic_entry = adjust_apic_id, + .pre_mp_smm_init = southcluster_smm_clear_state, + .per_cpu_smm_trigger = per_cpu_smm_trigger, + .relocation_handler = relocation_handler, + .post_mp_init = southcluster_smm_enable_smi, +}; + +void soc_init_cpus(device_t dev) +{ + struct bus *cpu_bus = dev->link_list; + + printk(BIOS_SPEW, "%s/%s ( %s )\n", + __FILE__, __func__, dev_name(dev)); + + if (mp_init_with_smm(cpu_bus, &mp_ops)) { + printk(BIOS_ERR, "MP initialization failure.\n"); + } }
1
0
0
0
New patch to review for coreboot: soc/intel/fsp_broadwell_de: convert to using common MP init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14592
-gerrit commit 95963ceb3180046334a4e08e12fea93a25a94f73 Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 15:26:29 2016 -0500 soc/intel/fsp_broadwell_de: convert to using common MP init In order to reduce duplication of code use the common MP initialization flow. Change-Id: I2a7c628cfae7cf6af6e89fa8fc274f59127ff7c7 Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/soc/intel/fsp_broadwell_de/cpu.c | 60 +++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/src/soc/intel/fsp_broadwell_de/cpu.c b/src/soc/intel/fsp_broadwell_de/cpu.c index 2f6487a..ca08204 100644 --- a/src/soc/intel/fsp_broadwell_de/cpu.c +++ b/src/soc/intel/fsp_broadwell_de/cpu.c @@ -27,15 +27,42 @@ #include <soc/pattrs.h> #include <soc/ramstage.h> -static void configure_mca(void); +static void pre_mp_init(void) +{ + x86_mtrr_check(); + + /* Enable the local cpu apics */ + setup_lapic(); +} + +static int get_cpu_count(void) +{ + const struct pattrs *pattrs = pattrs_get(); -static struct mp_flight_record mp_steps[] = { - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), + return pattrs->num_cpus; +} + +static void get_microcode_info(const void **microcode, int *parallel) +{ + const struct pattrs *pattrs = pattrs_get(); + + *microcode = pattrs->microcode_patch; + *parallel = 1; +} + +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_microcode_info = get_microcode_info, }; -static int adjust_apic_id(int index, int apic_id) +void broadwell_de_init_cpus(device_t dev) { - return index; + struct bus *cpu_bus = dev->link_list; + + if (mp_init_with_smm(cpu_bus, &mp_ops)) { + printk(BIOS_ERR, "MP initialization failure.\n"); + } } static void configure_mca(void) @@ -63,29 +90,6 @@ static void configure_mca(void) wrmsr(MSR_IA32_MC0_STATUS + (i * 4), msr); } -void broadwell_de_init_cpus(device_t dev) -{ - struct bus *cpu_bus = dev->link_list; - const struct pattrs *pattrs = pattrs_get(); - struct mp_params mp_params; - - x86_mtrr_check(); - - /* Enable the local cpu apics */ - setup_lapic(); - - mp_params.num_cpus = pattrs->num_cpus, - mp_params.parallel_microcode_load = 1, - mp_params.adjust_apic_id = adjust_apic_id; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = pattrs->microcode_patch; - - if (mp_init(cpu_bus, &mp_params)) { - printk(BIOS_ERR, "MP initialization failure.\n"); - } -} - static void broadwell_de_core_init(device_t cpu) { printk(BIOS_DEBUG, "Init Broadwell-DE core.\n");
1
0
0
0
Patch set updated for coreboot: soc/intel/baytrail: convert to using common MP and SMM init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14580
-gerrit commit 4bcb1c3650155943d259c945c1f5a34e59049512 Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 11:12:52 2016 -0500 soc/intel/baytrail: convert to using common MP and SMM init In order to reduce duplication of code use the common MP and SMM initialization flow. Change-Id: I5c5d678d7adb4c489752cca80b20f785ec8749d4 Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/soc/intel/baytrail/cpu.c | 262 +++++++++++++-------------------------- src/soc/intel/fsp_baytrail/cpu.c | 2 +- 2 files changed, 88 insertions(+), 176 deletions(-) diff --git a/src/soc/intel/baytrail/cpu.c b/src/soc/intel/baytrail/cpu.c index fd9c7fb..94c3f2b 100644 --- a/src/soc/intel/baytrail/cpu.c +++ b/src/soc/intel/baytrail/cpu.c @@ -32,36 +32,6 @@ #include <soc/ramstage.h> #include <soc/smm.h> -static void smm_relocate(void); -static void enable_smis(void); - -static struct mp_flight_record mp_steps[] = { - MP_FR_BLOCK_APS(smm_relocate, smm_relocate), - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish initialization before proceeding. */ - MP_FR_BLOCK_APS(NULL, enable_smis), -}; - -/* The APIC id space on Bay Trail is sparse. Each id is separated by 2. */ -static int adjust_apic_id(int index, int apic_id) -{ - return 2 * index; -} - -/* Package level MSRs */ -const struct reg_script package_msr_script[] = { - /* Set Package TDP to ~7W */ - REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa), - REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0), - REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702), - REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b), - REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0), - REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305), - REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d), - REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27), - REG_SCRIPT_END -}; - /* Core level MSRs */ const struct reg_script core_msr_script[] = { /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */ @@ -74,48 +44,6 @@ const struct reg_script core_msr_script[] = { REG_SCRIPT_END }; -void baytrail_init_cpus(device_t dev) -{ - struct bus *cpu_bus = dev->link_list; - const struct pattrs *pattrs = pattrs_get(); - struct mp_params mp_params; - uint32_t bsmrwac; - void *default_smm_area; - - /* Set up MTRRs based on physical address size. */ - x86_setup_mtrrs_with_detect(); - x86_mtrr_check(); - - mp_params.num_cpus = pattrs->num_cpus, - mp_params.parallel_microcode_load = 1, - mp_params.adjust_apic_id = adjust_apic_id; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = pattrs->microcode_patch; - - default_smm_area = backup_default_smm_area(); - - /* - * Configure the BUNIT to allow dirty cache line evictions in non-SMM - * mode for the lines that were dirtied while in SMM mode. Otherwise - * the writes would be silently dropped. - */ - bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; - iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); - - /* Set package MSRs */ - reg_script_run(package_msr_script); - - /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ - enable_turbo(); - - if (mp_init(cpu_bus, &mp_params)) { - printk(BIOS_ERR, "MP initialization failure.\n"); - } - - restore_default_smm_area(default_smm_area); -} - static void baytrail_core_init(device_t cpu) { printk(BIOS_DEBUG, "Init BayTrail core.\n"); @@ -151,7 +79,7 @@ static const struct cpu_driver driver __cpu_driver = { /* - * SMM loading and initialization. + * MP and SMM loading initialization. */ struct smm_relocation_attrs { @@ -162,98 +90,55 @@ struct smm_relocation_attrs { static struct smm_relocation_attrs relo_attrs; -static void adjust_apic_id_map(struct smm_loader_params *smm_params) -{ - int i; - struct smm_runtime *runtime = smm_params->runtime; - - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = mp_get_apic_id(i); -} - -static void asmlinkage cpu_smm_do_relocation(void *arg) -{ - msr_t smrr; - em64t100_smm_state_save_area_t *smm_state; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } - - /* Set up SMRR. */ - smrr.lo = relo_attrs.smrr_base; - smrr.hi = 0; - wrmsr(SMRR_PHYS_BASE, smrr); - smrr.lo = relo_attrs.smrr_mask; - smrr.hi = 0; - wrmsr(SMRR_PHYS_MASK, smrr); - - /* The relocated handler runs with all CPUs concurrently. Therefore - * stagger the entry points adjusting SMBASE downwards by save state - * size * CPU num. */ - smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + runtime->smbase); - smm_state->smbase = relo_attrs.smbase - cpu * runtime->save_state_size; - printk(BIOS_DEBUG, "New SMBASE 0x%08x\n", smm_state->smbase); -} +/* Package level MSRs */ +static const struct reg_script package_msr_script[] = { + /* Set Package TDP to ~7W */ + REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa), + REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0), + REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702), + REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b), + REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0), + REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305), + REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d), + REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27), + REG_SCRIPT_END +}; -static int install_relocation_handler(int num_cpus) +static void pre_mp_init(void) { - const int save_state_size = sizeof(em64t100_smm_state_save_area_t); + uint32_t bsmrwac; - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - }; + /* Set up MTRRs based on physical address size. */ + x86_setup_mtrrs_with_detect(); + x86_mtrr_check(); - if (smm_setup_relocation_handler(&smm_params)) - return -1; + /* + * Configure the BUNIT to allow dirty cache line evictions in non-SMM + * mode for the lines that were dirtied while in SMM mode. Otherwise + * the writes would be silently dropped. + */ + bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; + iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); - adjust_apic_id_map(&smm_params); + /* Set package MSRs */ + reg_script_run(package_msr_script); - return 0; + /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ + enable_turbo(); } -static int install_permanent_handler(int num_cpus) +static int get_cpu_count(void) { - /* There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. */ - int save_state_size = sizeof(em64t100_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; - const int tseg_size = smm_region_size() - CONFIG_SMM_RESERVED_SIZE; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_attrs.smbase); - - if (smm_load_module((void *)relo_attrs.smbase, tseg_size, &smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; + const struct pattrs *pattrs = pattrs_get(); + + return pattrs->num_cpus; } -static int smm_load_handlers(void) +static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { /* All range registers are aligned to 4KiB */ const uint32_t rmask = ~((1 << 12) - 1); - const struct pattrs *pattrs = pattrs_get(); /* Initialize global tracking state. */ relo_attrs.smbase = (uint32_t)smm_region_start(); @@ -261,36 +146,29 @@ static int smm_load_handlers(void) relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask; relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; - /* Install handlers. */ - if (install_relocation_handler(pattrs->num_cpus) < 0) { - printk(BIOS_ERR, "Unable to install SMM relocation handler.\n"); - return -1; - } + *perm_smbase = relo_attrs.smbase; + *perm_smsize = smm_region_size() - CONFIG_SMM_RESERVED_SIZE; + *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t); +} - if (install_permanent_handler(pattrs->num_cpus) < 0) { - printk(BIOS_ERR, "Unable to install SMM permanent handler.\n"); - return -1; - } +/* The APIC id space on Bay Trail is sparse. Each id is separated by 2. */ +static int adjust_apic_id(int index, int apic_id) +{ + return 2 * index; +} - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - wbinvd(); +static void get_microcode_info(const void **microcode, int *parallel) +{ + const struct pattrs *pattrs = pattrs_get(); - return 0; + *microcode = pattrs->microcode_patch; + *parallel = 1; } -static void smm_relocate(void) +static void per_cpu_smm_trigger(void) { const struct pattrs *pattrs = pattrs_get(); - /* Load relocation and permanent handler. */ - if (boot_cpu()) { - if (smm_load_handlers() < 0) { - printk(BIOS_ERR, "Error loading SMM handlers.\n"); - return; - } - southcluster_smm_clear_state(); - } - /* Relocate SMM space. */ smm_initiate_relocation(); @@ -298,7 +176,41 @@ static void smm_relocate(void) intel_microcode_load_unlocked(pattrs->microcode_patch); } -static void enable_smis(void) +static void relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { - southcluster_smm_enable_smi(); + msr_t smrr; + em64t100_smm_state_save_area_t *smm_state; + + /* Set up SMRR. */ + smrr.lo = relo_attrs.smrr_base; + smrr.hi = 0; + wrmsr(SMRR_PHYS_BASE, smrr); + smrr.lo = relo_attrs.smrr_mask; + smrr.hi = 0; + wrmsr(SMRR_PHYS_MASK, smrr); + + smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase); + smm_state->smbase = staggered_smbase; +} + +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_smm_info = get_smm_info, + .get_microcode_info = get_microcode_info, + .adjust_cpu_apic_entry = adjust_apic_id, + .pre_mp_smm_init = southcluster_smm_clear_state, + .per_cpu_smm_trigger = per_cpu_smm_trigger, + .relocation_handler = relocation_handler, + .post_mp_init = southcluster_smm_enable_smi, +}; + +void baytrail_init_cpus(device_t dev) +{ + struct bus *cpu_bus = dev->link_list; + + if (mp_init_with_smm(cpu_bus, &mp_ops)) { + printk(BIOS_ERR, "MP initialization failure.\n"); + } } diff --git a/src/soc/intel/fsp_baytrail/cpu.c b/src/soc/intel/fsp_baytrail/cpu.c index 3856a06..2753f1c 100644 --- a/src/soc/intel/fsp_baytrail/cpu.c +++ b/src/soc/intel/fsp_baytrail/cpu.c @@ -55,7 +55,7 @@ static int adjust_apic_id(int index, int apic_id) } /* Core level MSRs */ -const struct reg_script core_msr_script[] = { +static const struct reg_script core_msr_script[] = { /* Dynamic L2 shrink enable and threshold */ REG_MSR_RMW(MSR_PMG_CST_CONFIG_CONTROL, ~0x3f000f, 0xe0008), /* Disable C1E */
1
0
0
0
Patch set updated for coreboot: soc/intel/fsp_baytrail: convert to using common MP and SMM init
by Aaron Durbin
04 May '16
04 May '16
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14581
-gerrit commit 2bdef97dbe4f516ff64b5e8a8c55c4e16a8e1b9b Author: Aaron Durbin <adurbin(a)chromium.org> Date: Tue May 3 11:31:32 2016 -0500 soc/intel/fsp_baytrail: convert to using common MP and SMM init In order to reduce duplication of code use the common MP and SMM initialization flow. Change-Id: I709ea938b720f26b351a1f950593efe077edb997 Signed-off-by: Aaron Durbin <adurbin(a)chromium.org> --- src/soc/intel/fsp_baytrail/cpu.c | 212 +++++++++++---------------------------- 1 file changed, 59 insertions(+), 153 deletions(-) diff --git a/src/soc/intel/fsp_baytrail/cpu.c b/src/soc/intel/fsp_baytrail/cpu.c index 2753f1c..3fba0b7 100644 --- a/src/soc/intel/fsp_baytrail/cpu.c +++ b/src/soc/intel/fsp_baytrail/cpu.c @@ -30,30 +30,8 @@ #include <soc/msr.h> #include <soc/pattrs.h> #include <soc/ramstage.h> -#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) #include <soc/smm.h> -static void smm_relocate(void); -static void enable_smis(void); - -static struct mp_flight_record mp_steps[] = { - MP_FR_BLOCK_APS(smm_relocate, smm_relocate), - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish initialization before proceeding. */ - MP_FR_BLOCK_APS(NULL, enable_smis), -}; -#else /* CONFIG_HAVE_SMI_HANDLER */ -static struct mp_flight_record mp_steps[] = { - MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), -}; -#endif - -/* The APIC id space on Bay Trail is sparse. Each id is separated by 2. */ -static int adjust_apic_id(int index, int apic_id) -{ - return 2 * index; -} - /* Core level MSRs */ static const struct reg_script core_msr_script[] = { /* Dynamic L2 shrink enable and threshold */ @@ -64,29 +42,6 @@ static const struct reg_script core_msr_script[] = { REG_SCRIPT_END }; -void baytrail_init_cpus(device_t dev) -{ - struct bus *cpu_bus = dev->link_list; - const struct pattrs *pattrs = pattrs_get(); - struct mp_params mp_params; - - x86_mtrr_check(); - - /* Enable the local cpu apics */ - setup_lapic(); - - mp_params.num_cpus = pattrs->num_cpus, - mp_params.parallel_microcode_load = 1, - mp_params.adjust_apic_id = adjust_apic_id; - mp_params.flight_plan = &mp_steps[0]; - mp_params.num_records = ARRAY_SIZE(mp_steps); - mp_params.microcode_pointer = pattrs->microcode_patch; - - if (mp_init(cpu_bus, &mp_params)) { - printk(BIOS_ERR, "MP initialization failure.\n"); - } -} - static void baytrail_core_init(device_t cpu) { printk(BIOS_DEBUG, "Init BayTrail core.\n"); @@ -123,9 +78,8 @@ static const struct cpu_driver driver __cpu_driver = { .id_table = cpu_table, }; -#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) /* - * SMM loading and initialization. + * MP and SMM loading initialization. */ struct smm_relocation_attrs { @@ -136,98 +90,26 @@ struct smm_relocation_attrs { static struct smm_relocation_attrs relo_attrs; -static void adjust_apic_id_map(struct smm_loader_params *smm_params) +static void pre_mp_init(void) { - int i; - struct smm_runtime *runtime = smm_params->runtime; - - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = mp_get_apic_id(i); -} - -static void asmlinkage cpu_smm_do_relocation(void *arg) -{ - msr_t smrr; - em64t100_smm_state_save_area_t *smm_state; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } - - /* Set up SMRR. */ - smrr.lo = relo_attrs.smrr_base; - smrr.hi = 0; - wrmsr(SMRR_PHYS_BASE, smrr); - smrr.lo = relo_attrs.smrr_mask; - smrr.hi = 0; - wrmsr(SMRR_PHYS_MASK, smrr); - - /* The relocated handler runs with all CPUs concurrently. Therefore - * stagger the entry points adjusting SMBASE downwards by save state - * size * CPU num. */ - smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + runtime->smbase); - smm_state->smbase = relo_attrs.smbase - cpu * runtime->save_state_size; - printk(BIOS_DEBUG, "New SMBASE 0x%08x\n", smm_state->smbase); -} - -static int install_relocation_handler(int num_cpus) -{ - const int save_state_size = sizeof(em64t100_smm_state_save_area_t); - - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - }; - - if (smm_setup_relocation_handler(&smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); + x86_mtrr_check(); - return 0; + /* Enable the local cpu apics */ + setup_lapic(); } -static int install_permanent_handler(int num_cpus) +static int get_cpu_count(void) { - /* There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. */ - int save_state_size = sizeof(em64t100_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; - const int tseg_size = smm_region_size() - CONFIG_SMM_RESERVED_SIZE; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_attrs.smbase); - - if (smm_load_module((void *)relo_attrs.smbase, tseg_size, &smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); + const struct pattrs *pattrs = pattrs_get(); - return 0; + return pattrs->num_cpus; } -static int smm_load_handlers(void) +static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { /* All range registers are aligned to 4KiB */ const uint32_t rmask = ~((1 << 12) - 1); - const struct pattrs *pattrs = pattrs_get(); /* Initialize global tracking state. */ relo_attrs.smbase = (uint32_t)smm_region_start(); @@ -235,41 +117,65 @@ static int smm_load_handlers(void) relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask; relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; - /* Install handlers. */ - if (install_relocation_handler(pattrs->num_cpus) < 0) { - printk(BIOS_ERR, "Unable to install SMM relocation handler.\n"); - return -1; - } + *perm_smbase = relo_attrs.smbase; + *perm_smsize = smm_region_size() - CONFIG_SMM_RESERVED_SIZE; + *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t); +} - if (install_permanent_handler(pattrs->num_cpus) < 0) { - printk(BIOS_ERR, "Unable to install SMM permanent handler.\n"); - return -1; - } +/* The APIC id space on Bay Trail is sparse. Each id is separated by 2. */ +static int adjust_apic_id(int index, int apic_id) +{ + return 2 * index; +} - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - wbinvd(); +static void get_microcode_info(const void **microcode, int *parallel) +{ + const struct pattrs *pattrs = pattrs_get(); - return 0; + *microcode = pattrs->microcode_patch; + *parallel = 1; } -static void smm_relocate(void) +static void relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { + msr_t smrr; + em64t100_smm_state_save_area_t *smm_state; - /* Load relocation and permanent handler. */ - if (boot_cpu()) { - if (smm_load_handlers() < 0) { - printk(BIOS_ERR, "Error loading SMM handlers.\n"); - return; - } - southcluster_smm_clear_state(); - } + /* Set up SMRR. */ + smrr.lo = relo_attrs.smrr_base; + smrr.hi = 0; + wrmsr(SMRR_PHYS_BASE, smrr); + smrr.lo = relo_attrs.smrr_mask; + smrr.hi = 0; + wrmsr(SMRR_PHYS_MASK, smrr); - /* Relocate SMM space. */ - smm_initiate_relocation(); + smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase); + smm_state->smbase = staggered_smbase; } static void enable_smis(void) { - southcluster_smm_enable_smi(); + if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) + southcluster_smm_enable_smi(); +} + +static const struct mp_ops mp_ops = { + .pre_mp_init = pre_mp_init, + .get_cpu_count = get_cpu_count, + .get_smm_info = get_smm_info, + .get_microcode_info = get_microcode_info, + .adjust_cpu_apic_entry = adjust_apic_id, + .pre_mp_smm_init = southcluster_smm_clear_state, + .relocation_handler = relocation_handler, + .post_mp_init = enable_smis, +}; + +void baytrail_init_cpus(device_t dev) +{ + struct bus *cpu_bus = dev->link_list; + + if (mp_init_with_smm(cpu_bus, &mp_ops)) { + printk(BIOS_ERR, "MP initialization failure.\n"); + } } -#endif
1
0
0
0
Patch set updated for coreboot: soc/intel/quark: Add temperature sensor support
by Leroy P Leahy
04 May '16
04 May '16
Leroy P Leahy (leroy.p.leahy(a)intel.com) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14565
-gerrit commit 5b2360cef65088233f545b5509f744f59d9ffecc Author: Lee Leahy <leroy.p.leahy(a)intel.com> Date: Sat Apr 30 08:48:52 2016 -0700 soc/intel/quark: Add temperature sensor support Migrate the temperature sensor support from QuarkFspPkg into coreboot. TEST=Build and run on Galileo Gen2 Change-Id: I6dc68c735375c9d1777693264674521f67397556 Signed-off-by: Lee Leahy <leroy.p.leahy(a)intel.com> --- src/soc/intel/quark/chip.c | 93 ++++++++++++++++++++++++++++ src/soc/intel/quark/include/soc/reg_access.h | 48 ++++++++++++++ src/soc/intel/quark/reg_access.c | 54 ++++++++++++++++ 3 files changed, 195 insertions(+) diff --git a/src/soc/intel/quark/chip.c b/src/soc/intel/quark/chip.c index 7ea8a06..aecc15a 100644 --- a/src/soc/intel/quark/chip.c +++ b/src/soc/intel/quark/chip.c @@ -17,9 +17,102 @@ #include <console/console.h> #include <device/device.h> #include <soc/ramstage.h> +#include <soc/reg_access.h> + +/* Cat Trip Clear value must be less than Cat Trip Set value */ +#define PLATFORM_CATASTROPHIC_TRIP_CELSIUS 105 +#define PLATFORM_CATASTROPHIC_CLEAR_CELSIUS 65 + +static const struct reg_script init_script[] = { + + /* Setup RMU Thermal sensor registers for Ratiometric mode. */ + REG_SOC_UNIT_RMW(QUARK_SCSS_SOC_UNIT_TSCGF1_CONFIG, + ~(B_TSCGF1_CONFIG_ISNSCURRENTSEL_MASK + | B_TSCGF1_CONFIG_ISNSCHOPSEL_MASK + | B_TSCGF1_CONFIG_ISNSINTERNALVREFEN + | B_TSCGF1_CONFIG_IBGEN + | B_TSCGF1_CONFIG_IBGCHOPEN), + ((V_TSCGF1_CONFIG_ISNSCURRENTSEL_RATIO_MODE + << B_TSCGF1_CONFIG_ISNSCURRENTSEL_BP) + | (V_TSCGF1_CONFIG_ISNSCHOPSEL_RATIO_MODE + << B_TSCGF1_CONFIG_ISNSCHOPSEL_BP) + | (V_TSCGF1_CONFIG_ISNSINTERNALVREFEN_RATIO_MODE + << B_TSCGF1_CONFIG_ISNSINTERNALVREFEN_BP) + | (V_TSCGF1_CONFIG_IBGEN_RATIO_MODE + << B_TSCGF1_CONFIG_IBGEN_BP) + | (V_TSCGF1_CONFIG_IBGCHOPEN_RATIO_MODE + << B_TSCGF1_CONFIG_IBGCHOPEN_BP))), + + REG_SOC_UNIT_RMW(QUARK_SCSS_SOC_UNIT_TSCGF2_CONFIG2, + ~(B_TSCGF2_CONFIG2_ICALCONFIGSEL_MASK + | B_TSCGF2_CONFIG2_ISPARECTRL_MASK + | B_TSCGF2_CONFIG2_ICALCOARSETUNE_MASK), + ((V_TSCGF2_CONFIG2_ICALCONFIGSEL_RATIO_MODE + << B_TSCGF2_CONFIG2_ICALCONFIGSEL_BP) + | (V_TSCGF2_CONFIG2_ISPARECTRL_RATIO_MODE + << B_TSCGF2_CONFIG2_ISPARECTRL_BP) + | (V_TSCGF2_CONFIG2_ICALCOARSETUNE_RATIO_MODE + << B_TSCGF2_CONFIG2_ICALCOARSETUNE_BP))), + + REG_SOC_UNIT_RMW(QUARK_SCSS_SOC_UNIT_TSCGF2_CONFIG, + ~(B_TSCGF2_CONFIG_IDSCONTROL_MASK + | B_TSCGF2_CONFIG_IDSTIMING_MASK), + ((V_TSCGF2_CONFIG_IDSCONTROL_RATIO_MODE + << B_TSCGF2_CONFIG_IDSCONTROL_BP) + | (V_TSCGF2_CONFIG_IDSTIMING_RATIO_MODE + << B_TSCGF2_CONFIG_IDSTIMING_BP))), + + REG_SOC_UNIT_RMW(QUARK_SCSS_SOC_UNIT_TSCGF3_CONFIG, + ~B_TSCGF3_CONFIG_ITSGAMMACOEFF_MASK, + V_TSCGF3_CONFIG_ITSGAMMACOEFF_RATIO_MODE + << B_TSCGF3_CONFIG_ITSGAMMACOEFF_BP), + + /* Enable RMU Thermal sensor with a Catastrophic Trip point. */ + + /* Setup Catastrophic Trip point. + * + * Trip Register fields are 8-bit temperature values of granularity 1 + * degree C where 0x00 corresponds to -50 degrees C and 0xFF corresponds + * to 205 degrees C. + * + * Add 50 to values to Celsius values to get values for register fields. + */ + REG_RMU_TEMP_RMW(QUARK_NC_RMU_REG_TS_TRIP, + ~(TS_CAT_TRIP_SET_THOLD_MASK | TS_CAT_TRIP_CLEAR_THOLD_MASK), + (((PLATFORM_CATASTROPHIC_TRIP_CELSIUS + 50) + << TS_CAT_TRIP_SET_THOLD_BP) + | ((PLATFORM_CATASTROPHIC_CLEAR_CELSIUS + 50) + << TS_CAT_TRIP_CLEAR_THOLD_BP))), + + /* To enable the TS do the following: + * 1) Take the TS out of reset by setting itsrst to 0x0. + * 2) Enable the TS using RMU Thermal sensor mode register. + */ + REG_SOC_UNIT_AND(QUARK_SCSS_SOC_UNIT_TSCGF3_CONFIG, + ~B_TSCGF3_CONFIG_ITSRST), + REG_RMU_TEMP_OR(QUARK_NC_RMU_REG_TS_MODE, TS_ENABLE), + + /* Lock all RMU Thermal sensor control & trip point registers. */ + REG_RMU_TEMP_OR(QUARK_NC_RMU_REG_CONFIG, TS_LOCK_THRM_CTRL_REGS_ENABLE + | TS_LOCK_AUX_TRIP_PT_REGS_ENABLE), + REG_SCRIPT_END +}; static void chip_init(void *chip_info) { + /* Validate the temperature settings */ + ASSERT(PLATFORM_CATASTROPHIC_TRIP_CELSIUS <= 255); + ASSERT(PLATFORM_CATASTROPHIC_TRIP_CELSIUS + > PLATFORM_CATASTROPHIC_CLEAR_CELSIUS); + + /* Set the temperature settings */ + reg_script_run(init_script); + ASSERT((reg_rmu_temp_read(QUARK_NC_RMU_REG_CONFIG) + & (TS_LOCK_THRM_CTRL_REGS_ENABLE + | TS_LOCK_AUX_TRIP_PT_REGS_ENABLE)) + == (TS_LOCK_THRM_CTRL_REGS_ENABLE + | TS_LOCK_AUX_TRIP_PT_REGS_ENABLE)); + /* Perform silicon specific init. */ if (IS_ENABLED(CONFIG_RELOCATE_FSP_INTO_DRAM)) intel_silicon_init(); diff --git a/src/soc/intel/quark/include/soc/reg_access.h b/src/soc/intel/quark/include/soc/reg_access.h index 934c75d..ddca668 100644 --- a/src/soc/intel/quark/include/soc/reg_access.h +++ b/src/soc/intel/quark/include/soc/reg_access.h @@ -18,10 +18,13 @@ #include <fsp/util.h> #include <reg_script.h> +#include <soc/IntelQNCConfig.h> #include <soc/QuarkNcSocId.h> enum { USB_PHY_REGS = 1, + SOC_UNIT_REGS, + RMU_TEMP_REGS, }; enum { @@ -32,6 +35,50 @@ enum { #define SOC_ACCESS(cmd_, reg_, size_, mask_, value_, timeout_, reg_set_) \ _REG_SCRIPT_ENCODE_RAW(REG_SCRIPT_COMMAND_##cmd_, SOC_TYPE, \ size_, reg_, mask_, value_, timeout_, reg_set_) + +/* RMU temperature register access macros */ +#define REG_RMU_TEMP_ACCESS(cmd_, reg_, mask_, value_, timeout_) \ + SOC_ACCESS(cmd_, reg_, REG_SCRIPT_SIZE_32, mask_, value_, timeout_, \ + RMU_TEMP_REGS) +#define REG_RMU_TEMP_READ(reg_) \ + REG_RMU_TEMP_ACCESS(READ, reg_, 0, 0, 0) +#define REG_RMU_TEMP_WRITE(reg_, value_) \ + REG_RMU_TEMP_ACCESS(WRITE, reg_, 0, value_, 0) +#define REG_RMU_TEMP_AND(reg_, value_) \ + REG_RMU_TEMP_RMW(reg_, value_, 0) +#define REG_RMU_TEMP_RMW(reg_, mask_, value_) \ + REG_RMU_TEMP_ACCESS(RMW, reg_, mask_, value_, 0) +#define REG_RMU_TEMP_RXW(reg_, mask_, value_) \ + REG_RMU_TEMP_ACCESS(RXW, reg_, mask_, value_, 0) +#define REG_RMU_TEMP_OR(reg_, value_) \ + REG_RMU_TEMP_RMW(reg_, 0xffffffff, value_) +#define REG_RMU_TEMP_POLL(reg_, mask_, value_, timeout_) \ + REG_RMU_TEMP_ACCESS(POLL, reg_, mask_, value_, timeout_) +#define REG_RMU_TEMP_XOR(reg_, value_) \ + REG_RMU_TEMP_RXW(reg_, 0xffffffff, value_) + +/* Temperature sensor access macros */ +#define REG_SOC_UNIT_ACCESS(cmd_, reg_, mask_, value_, timeout_) \ + SOC_ACCESS(cmd_, reg_, REG_SCRIPT_SIZE_32, mask_, value_, timeout_, \ + SOC_UNIT_REGS) +#define REG_SOC_UNIT_READ(reg_) \ + REG_SOC_UNIT_ACCESS(READ, reg_, 0, 0, 0) +#define REG_SOC_UNIT_WRITE(reg_, value_) \ + REG_SOC_UNIT_ACCESS(WRITE, reg_, 0, value_, 0) +#define REG_SOC_UNIT_AND(reg_, value_) \ + REG_SOC_UNIT_RMW(reg_, value_, 0) +#define REG_SOC_UNIT_RMW(reg_, mask_, value_) \ + REG_SOC_UNIT_ACCESS(RMW, reg_, mask_, value_, 0) +#define REG_SOC_UNIT_RXW(reg_, mask_, value_) \ + REG_SOC_UNIT_ACCESS(RXW, reg_, mask_, value_, 0) +#define REG_SOC_UNIT_OR(reg_, value_) \ + REG_SOC_UNIT_RMW(reg_, 0xffffffff, value_) +#define REG_SOC_UNIT_POLL(reg_, mask_, value_, timeout_) \ + REG_SOC_UNIT_ACCESS(POLL, reg_, mask_, value_, timeout_) +#define REG_SOC_UNIT_XOR(reg_, value_) \ + REG_SOC_UNIT_RXW(reg_, 0xffffffff, value_) + +/* USB register access macros */ #define REG_USB_ACCESS(cmd_, reg_, mask_, value_, timeout_) \ SOC_ACCESS(cmd_, reg_, REG_SCRIPT_SIZE_32, mask_, value_, timeout_, \ USB_PHY_REGS) @@ -56,5 +103,6 @@ void mcr_write(uint8_t opcode, uint8_t port, uint32_t reg_address); uint32_t mdr_read(void); void mdr_write(uint32_t value); void mea_write(uint32_t reg_address); +uint32_t reg_rmu_temp_read(uint32_t reg_address); #endif /* _QUARK_REG_ACCESS_H_ */ diff --git a/src/soc/intel/quark/reg_access.c b/src/soc/intel/quark/reg_access.c index 278fc7e..fb80f3d 100644 --- a/src/soc/intel/quark/reg_access.c +++ b/src/soc/intel/quark/reg_access.c @@ -45,6 +45,40 @@ void mea_write(uint32_t reg_address) & QNC_MEA_MASK); } +uint32_t reg_rmu_temp_read(uint32_t reg_address) +{ + /* Read the RMU temperature register */ + mea_write(reg_address); + mcr_write(QUARK_OPCODE_READ, QUARK_NC_RMU_SB_PORT_ID, reg_address); + return mdr_read(); +} + +static void reg_rmu_temp_write(uint32_t reg_address, uint32_t value) +{ + /* Write the RMU temperature register */ + mea_write(reg_address); + mdr_write(value); + mcr_write(QUARK_OPCODE_WRITE, QUARK_NC_RMU_SB_PORT_ID, reg_address); +} + +static uint32_t reg_soc_unit_read(uint32_t reg_address) +{ + /* Read the temperature sensor register */ + mea_write(reg_address); + mcr_write(QUARK_ALT_OPCODE_READ, QUARK_SCSS_SOC_UNIT_SB_PORT_ID, + reg_address); + return mdr_read(); +} + +static void reg_soc_unit_write(uint32_t reg_address, uint32_t value) +{ + /* Write the temperature sensor register */ + mea_write(reg_address); + mdr_write(value); + mcr_write(QUARK_ALT_OPCODE_WRITE, QUARK_SCSS_SOC_UNIT_SB_PORT_ID, + reg_address); +} + static uint32_t reg_usb_read(uint32_t reg_address) { /* Read the USB register */ @@ -76,6 +110,16 @@ static uint64_t reg_read(struct reg_script_context *ctx) ctx->display_features = REG_SCRIPT_DISPLAY_NOTHING; return 0; + case RMU_TEMP_REGS: + ctx->display_prefix = "RMU TEMP"; + value = reg_rmu_temp_read(step->reg); + break; + + case SOC_UNIT_REGS: + ctx->display_prefix = "SOC Unit"; + value = reg_soc_unit_read(step->reg); + break; + case USB_PHY_REGS: ctx->display_prefix = "USB PHY"; value = reg_usb_read(step->reg); @@ -96,6 +140,16 @@ static void reg_write(struct reg_script_context *ctx) ctx->display_features = REG_SCRIPT_DISPLAY_NOTHING; return; + case RMU_TEMP_REGS: + ctx->display_prefix = "RMU TEMP"; + reg_rmu_temp_write(step->reg, (uint32_t)step->value); + break; + + case SOC_UNIT_REGS: + ctx->display_prefix = "SOC Unit"; + reg_soc_unit_write(step->reg, (uint32_t)step->value); + break; + case USB_PHY_REGS: ctx->display_prefix = "USB PHY"; reg_usb_write(step->reg, (uint32_t)step->value);
1
0
0
0
Patch set updated for coreboot: soc/intel/quark: Add USB PHY initialization
by Leroy P Leahy
04 May '16
04 May '16
Leroy P Leahy (leroy.p.leahy(a)intel.com) just uploaded a new patch set to gerrit, which you can find at
https://review.coreboot.org/14496
-gerrit commit fd7850e3200b2d410d6cbe55bd6d93f11e0b6244 Author: Lee Leahy <leroy.p.leahy(a)intel.com> Date: Mon May 2 14:31:02 2016 -0700 soc/intel/quark: Add USB PHY initialization Add register access support using register scripts. Initialize the USB PHY using register scripts. TEST=Build and run on Galileo Gen2 Change-Id: I34a8e78eab3c7314ca34343eccc8aeef0622798a Signed-off-by: Lee Leahy <leroy.p.leahy(a)intel.com> --- src/soc/intel/quark/Kconfig | 1 + src/soc/intel/quark/Makefile.inc | 3 + src/soc/intel/quark/include/soc/pci_devs.h | 6 +- src/soc/intel/quark/include/soc/reg_access.h | 60 +++++++++++++++ src/soc/intel/quark/include/soc/romstage.h | 7 +- src/soc/intel/quark/reg_access.c | 110 +++++++++++++++++++++++++++ src/soc/intel/quark/romstage/mtrr.c | 25 ------ src/soc/intel/quark/usb.c | 92 ++++++++++++++++++++++ 8 files changed, 271 insertions(+), 33 deletions(-) diff --git a/src/soc/intel/quark/Kconfig b/src/soc/intel/quark/Kconfig index aab509a..8485aa3 100644 --- a/src/soc/intel/quark/Kconfig +++ b/src/soc/intel/quark/Kconfig @@ -26,6 +26,7 @@ config CPU_SPECIFIC_OPTIONS select ARCH_RAMSTAGE_X86_32 select ARCH_ROMSTAGE_X86_32 select ARCH_VERSTAGE_X86_32 + select REG_SCRIPT select SOC_INTEL_COMMON select SOC_SETS_MTRRS select TSC_CONSTANT_RATE diff --git a/src/soc/intel/quark/Makefile.inc b/src/soc/intel/quark/Makefile.inc index d8650fa..3a865b8 100644 --- a/src/soc/intel/quark/Makefile.inc +++ b/src/soc/intel/quark/Makefile.inc @@ -19,6 +19,7 @@ subdirs-y += romstage subdirs-y += ../../../cpu/x86/tsc romstage-y += memmap.c +romstage-y += reg_access.c romstage-y += tsc_freq.c romstage-$(CONFIG_ENABLE_BUILTIN_HSUART1) += uart_common.c @@ -27,9 +28,11 @@ ramstage-y += chip.c ramstage-y += memmap.c ramstage-y += northcluster.c ramstage-y += pmc.c +ramstage-y += reg_access.c ramstage-y += tsc_freq.c ramstage-$(CONFIG_ENABLE_BUILTIN_HSUART1) += uart_common.c ramstage-$(CONFIG_ENABLE_BUILTIN_HSUART1) += uart.c +ramstage-y += usb.c CPPFLAGS_common += -I$(src)/soc/intel/quark CPPFLAGS_common += -I$(src)/soc/intel/quark/include diff --git a/src/soc/intel/quark/include/soc/pci_devs.h b/src/soc/intel/quark/include/soc/pci_devs.h index 4f577ce..a912c4c 100644 --- a/src/soc/intel/quark/include/soc/pci_devs.h +++ b/src/soc/intel/quark/include/soc/pci_devs.h @@ -25,9 +25,11 @@ /* DEVICE 0 (Memroy Controller Hub) */ #define MC_BDF PCI_DEV(PCI_BUS_NUMBER_QNC, MC_DEV, MC_FUN) -/* IO Fabric 1 */ -#define HSUART_DEVID 0x0936 +/* Device IDs */ +#define HSUART_DEVID 0x0936 +#define EHCI_DEVID 0x0939 +/* IO Fabric 1 */ #define SIO1_DEV 0x14 # define HSUART1_DEV SIO1_DEV # define HSUART1_FUNC 5 diff --git a/src/soc/intel/quark/include/soc/reg_access.h b/src/soc/intel/quark/include/soc/reg_access.h new file mode 100644 index 0000000..934c75d --- /dev/null +++ b/src/soc/intel/quark/include/soc/reg_access.h @@ -0,0 +1,60 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2016 Intel Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _QUARK_REG_ACCESS_H_ +#define _QUARK_REG_ACCESS_H_ + +#include <fsp/util.h> +#include <reg_script.h> +#include <soc/QuarkNcSocId.h> + +enum { + USB_PHY_REGS = 1, +}; + +enum { + SOC_TYPE = REG_SCRIPT_TYPE_SOC_BASE, + /* Add additional SOC access types here*/ +}; + +#define SOC_ACCESS(cmd_, reg_, size_, mask_, value_, timeout_, reg_set_) \ + _REG_SCRIPT_ENCODE_RAW(REG_SCRIPT_COMMAND_##cmd_, SOC_TYPE, \ + size_, reg_, mask_, value_, timeout_, reg_set_) +#define REG_USB_ACCESS(cmd_, reg_, mask_, value_, timeout_) \ + SOC_ACCESS(cmd_, reg_, REG_SCRIPT_SIZE_32, mask_, value_, timeout_, \ + USB_PHY_REGS) +#define REG_USB_READ(reg_) \ + REG_USB_ACCESS(READ, reg_, 0, 0, 0) +#define REG_USB_WRITE(reg_, value_) \ + REG_USB_ACCESS(WRITE, reg_, 0, value_, 0) +#define REG_USB_AND(reg_, value_) \ + REG_USB_RMW(reg_, value_, 0) +#define REG_USB_RMW(reg_, mask_, value_) \ + REG_USB_ACCESS(RMW, reg_, mask_, value_, 0) +#define REG_USB_RXW(reg_, mask_, value_) \ + REG_USB_ACCESS(RXW, reg_, mask_, value_, 0) +#define REG_USB_OR(reg_, value_) \ + REG_USB_RMW(reg_, 0xffffffff, value_) +#define REG_USB_POLL(reg_, mask_, value_, timeout_) \ + REG_USB_ACCESS(POLL, reg_, mask_, value_, timeout_) +#define REG_USB_XOR(reg_, value_) \ + REG_USB_RXW(reg_, 0xffffffff, value_) + +void mcr_write(uint8_t opcode, uint8_t port, uint32_t reg_address); +uint32_t mdr_read(void); +void mdr_write(uint32_t value); +void mea_write(uint32_t reg_address); + +#endif /* _QUARK_REG_ACCESS_H_ */ diff --git a/src/soc/intel/quark/include/soc/romstage.h b/src/soc/intel/quark/include/soc/romstage.h index c2c7e9c..c344ada 100644 --- a/src/soc/intel/quark/include/soc/romstage.h +++ b/src/soc/intel/quark/include/soc/romstage.h @@ -23,13 +23,8 @@ #endif #include <fsp/romstage.h> -#include <fsp/util.h> -#include <soc/QuarkNcSocId.h> +#include <soc/reg_access.h> -void mcr_write(uint8_t opcode, uint8_t port, uint32_t reg_address); -uint32_t mdr_read(void); -void mdr_write(uint32_t value); -void mea_write(uint32_t reg_address); uint32_t port_reg_read(uint8_t port, uint32_t offset); void port_reg_write(uint8_t port, uint32_t offset, uint32_t value); void report_platform_info(void); diff --git a/src/soc/intel/quark/reg_access.c b/src/soc/intel/quark/reg_access.c new file mode 100644 index 0000000..278fc7e --- /dev/null +++ b/src/soc/intel/quark/reg_access.c @@ -0,0 +1,110 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2016 Intel Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied wacbmem_entryanty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define __SIMPLE_DEVICE__ + +#include <arch/io.h> +#include <console/console.h> +#include <soc/pci_devs.h> +#include <soc/reg_access.h> + +void mcr_write(uint8_t opcode, uint8_t port, uint32_t reg_address) +{ + pci_write_config32(MC_BDF, QNC_ACCESS_PORT_MCR, + (opcode << QNC_MCR_OP_OFFSET) + | ((uint32_t)port << QNC_MCR_PORT_OFFSET) + | ((reg_address & QNC_MCR_MASK) << QNC_MCR_REG_OFFSET) + | QNC_MCR_BYTE_ENABLES); +} + +uint32_t mdr_read(void) +{ + return pci_read_config32(MC_BDF, QNC_ACCESS_PORT_MDR); +} + +void mdr_write(uint32_t value) +{ + pci_write_config32(MC_BDF, QNC_ACCESS_PORT_MDR, value); +} + +void mea_write(uint32_t reg_address) +{ + pci_write_config32(MC_BDF, QNC_ACCESS_PORT_MEA, reg_address + & QNC_MEA_MASK); +} + +static uint32_t reg_usb_read(uint32_t reg_address) +{ + /* Read the USB register */ + mea_write(reg_address); + mcr_write(QUARK_ALT_OPCODE_READ, QUARK_SC_USB_AFE_SB_PORT_ID, + reg_address); + return mdr_read(); +} + +static void reg_usb_write(uint32_t reg_address, uint32_t value) +{ + /* Write the USB register */ + mea_write(reg_address); + mdr_write(value); + mcr_write(QUARK_ALT_OPCODE_WRITE, QUARK_SC_USB_AFE_SB_PORT_ID, + reg_address); +} + +static uint64_t reg_read(struct reg_script_context *ctx) +{ + const struct reg_script *step = ctx->step; + uint64_t value = 0; + + switch (step->id) { + default: + printk(BIOS_ERR, + "ERROR - Unknown register set (0x%08x)!\n", + step->id); + ctx->display_features = REG_SCRIPT_DISPLAY_NOTHING; + return 0; + + case USB_PHY_REGS: + ctx->display_prefix = "USB PHY"; + value = reg_usb_read(step->reg); + break; + } + return value; +} + +static void reg_write(struct reg_script_context *ctx) +{ + const struct reg_script *step = ctx->step; + + switch (step->id) { + default: + printk(BIOS_ERR, + "ERROR - Unknown register set (0x%08x)!\n", + step->id); + ctx->display_features = REG_SCRIPT_DISPLAY_NOTHING; + return; + + case USB_PHY_REGS: + ctx->display_prefix = "USB PHY"; + reg_usb_write(step->reg, (uint32_t)step->value); + break; + } +} + +const struct reg_script_bus_entry soc_reg_script_bus_table = { + SOC_TYPE, reg_read, reg_write +}; + +REG_SCRIPT_BUS_ENTRY(soc_reg_script_bus_table); diff --git a/src/soc/intel/quark/romstage/mtrr.c b/src/soc/intel/quark/romstage/mtrr.c index 8b237a3..f03be1d 100644 --- a/src/soc/intel/quark/romstage/mtrr.c +++ b/src/soc/intel/quark/romstage/mtrr.c @@ -21,31 +21,6 @@ #include <soc/pci_devs.h> #include <soc/romstage.h> -void mcr_write(uint8_t opcode, uint8_t port, uint32_t reg_address) -{ - pci_write_config32(MC_BDF, QNC_ACCESS_PORT_MCR, - (opcode << QNC_MCR_OP_OFFSET) - | ((uint32_t)port << QNC_MCR_PORT_OFFSET) - | ((reg_address & QNC_MCR_MASK) << QNC_MCR_REG_OFFSET) - | QNC_MCR_BYTE_ENABLES); -} - -uint32_t mdr_read(void) -{ - return pci_read_config32(MC_BDF, QNC_ACCESS_PORT_MDR); -} - -void mdr_write(uint32_t value) -{ - pci_write_config32(MC_BDF, QNC_ACCESS_PORT_MDR, value); -} - -void mea_write(uint32_t reg_address) -{ - pci_write_config32(MC_BDF, QNC_ACCESS_PORT_MEA, reg_address - & QNC_MEA_MASK); -} - static uint32_t mtrr_index_to_host_bridge_register_offset(unsigned long index) { uint32_t offset; diff --git a/src/soc/intel/quark/usb.c b/src/soc/intel/quark/usb.c new file mode 100644 index 0000000..460ef23 --- /dev/null +++ b/src/soc/intel/quark/usb.c @@ -0,0 +1,92 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <console/console.h> +#include <device/pci_ids.h> +#include <soc/pci_devs.h> +#include <soc/reg_access.h> + +/* USB Phy Registers */ +#define USB2_GLOBAL_PORT 0x4001 +#define USB2_PLL1 0x7F02 +#define USB2_PLL2 0x7F03 +#define USB2_COMPBG 0x7F04 + +/* In order to configure the USB PHY to use clk120 (ickusbcoreclk) as PLL + * reference clock and Port2 as a USB device port, the following sequence must + * be followed + */ +static const struct reg_script init_script[] = { + + /* Sighting #4930631 PDNRESCFG [8:7] of USB2_GLOBAL_PORT = 11b. + * For port 0 & 1 as host and port 2 as device. + */ + REG_USB_RXW(USB2_GLOBAL_PORT, ~(BIT8 | BIT7 | BIT1), (BIT8 | BIT7)), + + /* + * Sighting #4930653 Required BIOS change on Disconnect vref to change + * to 600mV. + */ + REG_USB_RXW(USB2_COMPBG, ~(BIT10 | BIT9 | BIT8 | BIT7), + (BIT10 | BIT7)), + + /* Sideband register write to USB AFE (Phy) + * (pllbypass) to bypass/Disable PLL before switch + */ + REG_USB_OR(USB2_PLL2, BIT29), + + /* Sideband register write to USB AFE (Phy) + * (coreclksel) to select 120MHz (ickusbcoreclk) clk source. + * (Default 0 to select 96MHz (ickusbclk96_npad/ppad)) + */ + REG_USB_OR(USB2_PLL1, BIT1), + + /* Sideband register write to USB AFE (Phy) + * (divide by 8) to achieve internal 480MHz clock + * for 120MHz input refclk. (Default: 4'b1000 (divide by 10) for 96MHz) + */ + REG_USB_RXW(USB2_PLL1, ~(BIT6 | BIT5 | BIT4 | BIT3), BIT6), + + /* Sideband register write to USB AFE (Phy) + * Clear (pllbypass) + */ + REG_USB_AND(USB2_PLL2, ~BIT29), + + /* Sideband register write to USB AFE (Phy) + * Set (startlock) to force the PLL FSM to restart the lock + * sequence due to input clock/freq switch. + */ + REG_USB_OR(USB2_PLL2, BIT24), + REG_SCRIPT_END +}; + +static void init(device_t dev) +{ + printk(BIOS_INFO, "Initializing USB PLLs\n"); + reg_script_run_on_dev(dev, init_script); +} + +static struct device_operations device_ops = { + .read_resources = pci_dev_read_resources, + .set_resources = pci_dev_set_resources, + .enable_resources = pci_dev_enable_resources, + .init = init, +}; + +static const struct pci_driver driver __pci_driver = { + .ops = &device_ops, + .vendor = PCI_VENDOR_ID_INTEL, + .device = EHCI_DEVID, +};
1
0
0
0
← Newer
1
...
167
168
169
170
171
172
173
...
185
Older →
Jump to page:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
Results per page:
10
25
50
100
200