Ravishankar Sarawadi (ravishankar.sarawadi@intel.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14536
-gerrit
commit b1830bfa65f336ae0a5baadac9de04f013b962b2 Author: Ravi Sarawadi ravishankar.sarawadi@intel.com Date: Wed Apr 27 15:25:27 2016 -0700
soc/intel/apollolake: Add SMM support
Add SMM feature support components.
Change-Id: I7c8e453980f5789171db038fa9509f7fa6f27a31 Signed-off-by: Ravi Sarawadi ravishankar.sarawadi@intel.com --- src/soc/intel/apollolake/Kconfig | 43 +++ src/soc/intel/apollolake/Makefile.inc | 8 +- src/soc/intel/apollolake/cpu.c | 80 +++++- src/soc/intel/apollolake/include/soc/cpu.h | 37 ++- src/soc/intel/apollolake/include/soc/nvs.h | 6 + src/soc/intel/apollolake/include/soc/pm.h | 1 + src/soc/intel/apollolake/include/soc/smm.h | 111 ++++++++ src/soc/intel/apollolake/memmap.c | 111 +++++++- src/soc/intel/apollolake/smi.c | 91 ++++++ src/soc/intel/apollolake/smihandler.c | 407 +++++++++++++++++++++++++++ src/soc/intel/apollolake/smmrelocate.c | 432 +++++++++++++++++++++++++++++ 11 files changed, 1310 insertions(+), 17 deletions(-)
diff --git a/src/soc/intel/apollolake/Kconfig b/src/soc/intel/apollolake/Kconfig index 7cd548b..2524f4a 100644 --- a/src/soc/intel/apollolake/Kconfig +++ b/src/soc/intel/apollolake/Kconfig @@ -11,6 +11,7 @@ config CPU_SPECIFIC_OPTIONS select ARCH_RAMSTAGE_X86_32 select ARCH_ROMSTAGE_X86_32 select ARCH_VERSTAGE_X86_32 + select BACKUP_DEFAULT_SMM_REGION # CPU specific options select CPU_INTEL_FIRMWARE_INTERFACE_TABLE select IOAPIC @@ -21,6 +22,7 @@ config CPU_SPECIFIC_OPTIONS select C_ENVIRONMENT_BOOTBLOCK select COLLECT_TIMESTAMPS select HAVE_INTEL_FIRMWARE + select HAVE_SMI_HANDLER select MMCONF_SUPPORT select MMCONF_SUPPORT_DEFAULT select PARALLEL_MP @@ -40,6 +42,8 @@ config CPU_SPECIFIC_OPTIONS select PLATFORM_USES_FSP2_0 select HAVE_HARD_RESET select SOC_INTEL_COMMON + select SMM_MODULES + select SMM_TSEG
config SOC_INTEL_COMMON_RESET bool @@ -110,4 +114,43 @@ config CACHE_MRC_SETTINGS bool default y
+config FSP_M_FILE + string "Intel FSP-M (memory init) binary path and filename" + depends on ADD_FSP_BINARIES + help + The path and filename of the Intel FSP-M binary for this platform. + +config FSP_S_FILE + string "Intel FSP-S (silicon init) binary path and filename" + depends on ADD_FSP_BINARIES + help + The path and filename of the Intel FSP-S binary for this platform. + +config VBT_FILE + string "VBT binary path and filename" + depends on ADD_VBT_DATA_FILE + help + The path and filename of the VBT binary for this platform. + +config ADD_FSP_BINARIES + bool "Add Intel FSP 2.0 binaries to CBFS" + help + Add the FSP-M and FSP-S binaries to CBFS. Note that coreboot does not + use the FSP-T binary, so that will not be included. + +config ADD_VBT_DATA_FILE + bool "Add a Video Bios Table (VBT) binary to CBFS" + help + Add a VBT file data file to CBFS. The VBT describes the integrated + GPU and connections, and is needed by FSP in order to initialize the + display. + +config SMM_RESERVED_SIZE + hex + default 0x200000 + +config SMM_TSEG_SIZE + hex + default 0x800000 + endif diff --git a/src/soc/intel/apollolake/Makefile.inc b/src/soc/intel/apollolake/Makefile.inc index f0a33f7..583be2a 100644 --- a/src/soc/intel/apollolake/Makefile.inc +++ b/src/soc/intel/apollolake/Makefile.inc @@ -28,8 +28,8 @@ romstage-y += mmap_boot.c romstage-y += tsc_freq.c romstage-y += pmutil.c
-smm-y += placeholders.c smm-y += pmutil.c +smm-y += smihandler.c
ramstage-$(CONFIG_HAVE_ACPI_TABLES) += acpi.c ramstage-y += cpu.c @@ -45,6 +45,8 @@ ramstage-y += northbridge.c ramstage-y += spi.c ramstage-y += tsc_freq.c ramstage-y += pmutil.c +ramstage-y += smi.c +ramstage-y += smmrelocate.c
postcar-y += exit_car.S postcar-y += memmap.c @@ -52,6 +54,10 @@ postcar-y += mmap_boot.c postcar-$(CONFIG_SOC_UART_DEBUG) += uart_early.c postcar-y += tsc_freq.c
+smm-y += pmutil.c +smm-y += uart_early.c +smm-y += tsc_freq.c + CPPFLAGS_common += -I$(src)/soc/intel/apollolake/include
endif diff --git a/src/soc/intel/apollolake/cpu.c b/src/soc/intel/apollolake/cpu.c index 03fae0e..9e08614 100644 --- a/src/soc/intel/apollolake/cpu.c +++ b/src/soc/intel/apollolake/cpu.c @@ -21,13 +21,38 @@ #include <cpu/x86/cache.h> #include <cpu/x86/mp.h> #include <cpu/x86/msr.h> +#include <cpu/x86/lapic.h> #include <cpu/x86/mtrr.h> +#include <cpu/x86/smm.h> #include <device/device.h> #include <device/pci.h> #include <soc/cpu.h> +#include <soc/smm.h> + +static void enable_lapic_tpr(void) +{ + msr_t msr; + + msr = rdmsr(MSR_PIC_MSG_CONTROL); + msr.lo &= ~(1 << 10); /* Enable APIC TPR updates */ + wrmsr(MSR_PIC_MSG_CONTROL, msr); +} + +static void cpu_core_init(device_t cpu) +{ + /* Turn on cache */ + x86_enable_cache(); + /* Set up Memory Type Range Registers */ + x86_setup_mtrrs(); + x86_mtrr_check(); + + /* Enable the local cpu apics */ + enable_lapic_tpr(); + setup_lapic(); +}
static struct device_operations cpu_dev_ops = { - .init = DEVICE_NOOP, + .init = cpu_core_init, };
static struct cpu_device_id cpu_table[] = { @@ -62,6 +87,34 @@ static void bsp_pre_mp_setup(void) x86_mtrr_check(); }
+int ht_disabled; +static int adjust_apic_id_ht_disabled(int index, int apic_id) +{ + return 2 * index; +} + +static void relocate(void *unused) +{ + /* Relocate the SMM handler. */ + smm_relocate(); +} + +static void enable_smis(void *unused) +{ + /* + * Now that all APs have been relocated as well as the BSP let SMIs + * start flowing. + */ + southbridge_smm_enable_smi(); + + /* Lock down the SMRAM space. */ +#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) + smm_lock(); +#endif +} + + + /* * CPU initialization recipe * @@ -70,33 +123,50 @@ static void bsp_pre_mp_setup(void) * the BSP and all APs will come up with the same microcode revision. */ static struct mp_flight_record flight_plan[] = { + MP_FR_NOBLOCK_APS(relocate, NULL, + relocate, NULL), /* NOTE: MTRR solution must be calculated before firing up the APs */ - MP_FR_NOBLOCK_APS(mp_initialize_cpu, NULL, mp_initialize_cpu, NULL), + MP_FR_BLOCK_APS(mp_initialize_cpu, NULL, mp_initialize_cpu, NULL), + MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL), };
void apollolake_init_cpus(device_t dev) { unsigned int num_virt_cores, num_phys_cores; + void *smm_save_area;
/* Pre-flight check */ bsp_pre_mp_setup();
+/* Save default SMM area before relocation occurs. */ + if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) + smm_save_area = backup_default_smm_area(); + else + smm_save_area = NULL; + /* Find CPU topology */ read_cpu_topology(&num_phys_cores, &num_virt_cores); printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n", num_phys_cores, num_virt_cores);
+ ht_disabled = num_phys_cores == num_virt_cores; /* Systems check */ struct mp_params flight_data_recorder = { .num_cpus = num_virt_cores, - .parallel_microcode_load = 0, - .microcode_pointer = NULL, - .adjust_apic_id = NULL, + .adjust_apic_id = + ht_disabled ? adjust_apic_id_ht_disabled:NULL, .flight_plan = flight_plan, .num_records = ARRAY_SIZE(flight_plan), }; +/* Load relocation and permanent handlers. Then initiate relocation.*/ + if (smm_initialize()) + printk(BIOS_CRIT, "SMM Initialization failed...\n");
/* Clear for take-off */ if (mp_init(dev->link_list, &flight_data_recorder) < 0) printk(BIOS_ERR, "MP initialization failure.\n"); + + /* Restore the default SMM region. */ + if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) + restore_default_smm_area(smm_save_area); } diff --git a/src/soc/intel/apollolake/include/soc/cpu.h b/src/soc/intel/apollolake/include/soc/cpu.h index aaa2001..466b601 100644 --- a/src/soc/intel/apollolake/include/soc/cpu.h +++ b/src/soc/intel/apollolake/include/soc/cpu.h @@ -23,6 +23,12 @@ #include <device/device.h>
void apollolake_init_cpus(struct device *dev); + +/* + * Determine if HyperThreading is disabled. + * The variable is not valid until setup_ap_init() has been called. + */ +extern int ht_disabled; #endif
#define CPUID_APOLLOLAKE_A0 0x506c8 @@ -33,6 +39,35 @@ void apollolake_init_cpus(struct device *dev); #define MSR_CORE_THREAD_COUNT 0x35 #define MSR_EVICT_CTL 0x2e0
-#define BASE_CLOCK_MHZ 100 +#define MSR_PIC_MSG_CONTROL 0x2e + +#define MSR_SMM_MCA_CAP 0x17d +#define SMM_CPU_SVRSTR_BIT 57 +#define SMM_CPU_SVRSTR_MASK (1 << (SMM_CPU_SVRSTR_BIT - 32)) +#define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4 +#define MSR_UNCORE_PRMRR_PHYS_MASK 0x2f5 +#define MSR_SMM_FEATURE_CONTROL 0x4e0 +#define SMM_CPU_SAVE_EN (1 << 1) +/* SMM save state MSRs */ +#define MSR_SMBASE 0xc20 +#define MSR_IEDBASE 0xc22 + +/* MTRR_CAP_MSR bits */ +#define SMRR_SUPPORTED (1<<11) + +#define BASE_CLOCK_MHZ 100 + +/* System Management RAM Control */ +#define SMRAM 0x88 +#define D_OPEN (1 << 6) +#define D_CLS (1 << 5) +#define D_LCK (1 << 4) +#define G_SMRAME (1 << 3) +#define C_BASE_SEG ((0 << 2) | (1 << 1) | (0 << 0)) + +/* Base GTT Stolen Memory */ +#define BGSM 0xb4 +/* TSEG base */ +#define TSEG 0xb8
#endif /* _SOC_APOLLOLAKE_CPU_H_ */ diff --git a/src/soc/intel/apollolake/include/soc/nvs.h b/src/soc/intel/apollolake/include/soc/nvs.h index 8b3a3af..0c1acd5 100644 --- a/src/soc/intel/apollolake/include/soc/nvs.h +++ b/src/soc/intel/apollolake/include/soc/nvs.h @@ -34,4 +34,10 @@ struct global_nvs_t { chromeos_acpi_t chromeos; } __attribute__((packed));
+#if ENV_SMM +/* Used in SMM to find the ACPI GNVS address */ +struct global_nvs_t *smm_get_gnvs(void); +#endif + + #endif /* _SOC_APOLLOLAKE_NVS_H_ */ diff --git a/src/soc/intel/apollolake/include/soc/pm.h b/src/soc/intel/apollolake/include/soc/pm.h index fe7a423..ee554e1 100644 --- a/src/soc/intel/apollolake/include/soc/pm.h +++ b/src/soc/intel/apollolake/include/soc/pm.h @@ -133,6 +133,7 @@ uint32_t clear_smi_status(void); uint16_t clear_pm1_status(void); uint32_t clear_tco_status(void); uint32_t clear_gpe_status(void); +uint32_t clear_alt_status(void); void clear_pmc_status(void); uint32_t get_smi_en(void); void enable_smi(uint32_t mask); diff --git a/src/soc/intel/apollolake/include/soc/smm.h b/src/soc/intel/apollolake/include/soc/smm.h new file mode 100644 index 0000000..66cc5c3 --- /dev/null +++ b/src/soc/intel/apollolake/include/soc/smm.h @@ -0,0 +1,111 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2014 Google Inc. + * Copyright (C) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc. + */ + +#ifndef _SOC_SMM_H_ +#define _SOC_SMM_H_ + +#include <stdint.h> +#include <cpu/x86/msr.h> +#include <soc/gpio.h> + +struct ied_header { + char signature[10]; + u32 size; + u8 reserved[34]; +} __attribute__ ((packed)); + +struct smm_relocation_params { + u32 smram_base; + u32 smram_size; + u32 ied_base; + u32 ied_size; + msr_t smrr_base; + msr_t smrr_mask; + msr_t emrr_base; + msr_t emrr_mask; + msr_t uncore_emrr_base; + msr_t uncore_emrr_mask; + /* + * The smm_save_state_in_msrs field indicates if SMM save state + * locations live in MSRs. This indicates to the CPUs how to adjust + * the SMMBASE and IEDBASE + */ + int smm_save_state_in_msrs; +}; + +/* Mainboard handler for GPI SMIs*/ +//void mainboard_smi_gpi_handler(const struct gpi_status *sts); + + +#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) +int smm_initialize(void); +void smm_relocate(void); + +/* These helpers are for performing SMM relocation. */ +void southbridge_trigger_smi(void); +void southbridge_clear_smi_status(void); + +/* + * The initialization of the southbridge is split into 2 compoments. One is + * for clearing the state in the SMM registers. The other is for enabling + * SMIs. + */ +void southbridge_smm_clear_state(void); +void southbridge_smm_enable_smi(void); +#else /* CONFIG_HAVE_SMI_HANDLER */ +static inline int smm_initialize(void) +{ + return 0; +} + +static inline void smm_relocate(void) {} +static inline void southbridge_trigger_smi(void) {} +static inline void southbridge_clear_smi_status(void) {} +static inline void southbridge_smm_clear_state(void) {} +static inline void southbridge_smm_enable_smi(void) {} +#endif /* CONFIG_HAVE_SMI_HANDLER */ + +/* + * mmap_region_granularity must to return a size which is a positive non-zero + * integer multiple of the SMM size when SMM is in use. When not using SMM, + * this value should be set to 8 MiB. + */ +size_t mmap_region_granularity(void); + +/* Fills in the arguments for the entire SMM region covered by chipset + * protections. e.g. TSEG. */ +void smm_region(void **start, size_t *size); + +enum { + /* SMM handler area. */ + SMM_SUBREGION_HANDLER, + /* SMM cache region. */ + SMM_SUBREGION_CACHE, + /* Chipset specific area. */ + SMM_SUBREGION_CHIPSET, + /* Total sub regions supported. */ + SMM_SUBREGION_NUM, +}; + +/* Fills in the start and size for the requested SMM subregion. Returns + * 0 on susccess, < 0 on failure. */ +int smm_subregion(int sub, void **start, size_t *size); + +#endif diff --git a/src/soc/intel/apollolake/memmap.c b/src/soc/intel/apollolake/memmap.c index bf172cb..c90e8a8 100644 --- a/src/soc/intel/apollolake/memmap.c +++ b/src/soc/intel/apollolake/memmap.c @@ -1,32 +1,123 @@ /* * This file is part of the coreboot project. * - * Copyright (C) 2015 Intel Corp. - * (Written by Andrey Petrov andrey.petrov@intel.com for Intel Corp.) + * Copyright (C) 2014 Google Inc. + * Copyright (C) 2015-2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc. */
#include <arch/io.h> #include <cbmem.h> +#include <console/console.h> +#include <device/device.h> #include <device/pci.h> -#include <soc/northbridge.h> -#include <soc/pci_devs.h> +#include <soc/romstage.h> +#include <soc/smm.h> +#include <soc/cpu.h> +#include "chip.h" +#include <stdlib.h>
-static uintptr_t smm_region_start(void) +size_t mmap_region_granularity(void) { - return ALIGN_DOWN(pci_read_config32(NB_DEV_ROOT, TSEG), 1*MiB); + if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) + /* Align to TSEG size when SMM is in use */ + if (CONFIG_SMM_TSEG_SIZE != 0) + return CONFIG_SMM_TSEG_SIZE; + + /* Make it 8MiB by default. */ + return 8*MiB; +} + +/* Returns base of requested region encoded in the system agent. */ +static inline uintptr_t system_agent_region_base(size_t reg) +{ + /* All regions here have 1 MiB alignment. */ +#if ENV_RAMSTAGE + return ALIGN_DOWN(read32((void *)(CONFIG_MMCONF_BASE_ADDRESS + reg)), + 1*MiB); +#else + return ALIGN_DOWN(pci_read_config32(PCI_DEV(0, 0, 0), reg), 1*MiB); +#endif +} + +static inline uintptr_t smm_region_start(void) +{ +#if ENV_RAMSTAGE + return read32((void *)(CONFIG_MMCONF_BASE_ADDRESS + TSEG)) & + 0xfff00000; +#else + return pci_read_config32(PCI_DEV(0, 0, 0), TSEG) & 0xfff00000; +#endif +} + +static inline size_t smm_region_size(void) +{ + return system_agent_region_base(BGSM) - smm_region_start(); +} + +void smm_region(void **start, size_t *size) +{ + *start = (void *)smm_region_start(); + *size = smm_region_size(); +} + +/* + * Subregions within SMM + * +-------------------------+ BGSM + * | IED | IED_REGION_SIZE + * +-------------------------+ + * | External Stage Cache | SMM_RESERVED_SIZE + * +-------------------------+ + * | code and data | + * | (TSEG) | + * +-------------------------+ TSEG + */ +int smm_subregion(int sub, void **start, size_t *size) +{ + uintptr_t sub_base; + size_t sub_size; + const size_t ied_size = CONFIG_IED_REGION_SIZE; + const size_t cache_size = CONFIG_SMM_RESERVED_SIZE; + + sub_base = smm_region_start(); + sub_size = smm_region_size(); + + switch (sub) { + case SMM_SUBREGION_HANDLER: + /* Handler starts at the base of TSEG. */ + sub_size -= ied_size; + sub_size -= cache_size; + break; + case SMM_SUBREGION_CACHE: + /* External cache is in the middle of TSEG. */ + sub_base += sub_size - (ied_size + cache_size); + sub_size = cache_size; + break; + case SMM_SUBREGION_CHIPSET: + /* IED is at the top. */ + sub_base += sub_size - ied_size; + sub_size = ied_size; + break; + default: + return -1; + } + + *start = (void *)sub_base; + *size = sub_size; + + return 0; }
void *cbmem_top(void) { return (void *)smm_region_start(); } + diff --git a/src/soc/intel/apollolake/smi.c b/src/soc/intel/apollolake/smi.c new file mode 100644 index 0000000..5a46396 --- /dev/null +++ b/src/soc/intel/apollolake/smi.c @@ -0,0 +1,91 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2008-2009 coresystems GmbH + * Copyright (C) 2014 Google Inc. + * Copyright (C) 2015-2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <device/device.h> +#include <device/pci.h> +#include <console/console.h> +#include <arch/io.h> +#include <cpu/cpu.h> +#include <cpu/x86/cache.h> +#include <cpu/x86/smm.h> +#include <string.h> +#include <soc/pm.h> +#include <soc/smm.h> + +void southbridge_smm_clear_state(void) +{ + printk(BIOS_DEBUG, "Initializing Southbridge SMI..."); + + if (get_smi_en() & APMC_EN) { + printk(BIOS_INFO, "SMI# handler already enabled?\n"); + return; + } + + printk(BIOS_DEBUG, "Done\n"); + + /* Dump and clear status registers */ + clear_smi_status(); + clear_pm1_status(); + clear_tco_status(); + clear_gpe_status(); +} + +void southbridge_smm_enable_smi(void) +{ + printk(BIOS_DEBUG, "Enabling SMIs.\n"); + /* Configure events */ + enable_pm1(PWRBTN_EN | GBL_EN); + disable_gpe(PME_B0_EN); + + /* Enable SMI generation */ + enable_smi(APMC_EN | SLP_SMI_EN | GBL_SMI_EN | EOS); +} + +void southbridge_trigger_smi(void) +{ + /* raise an SMI interrupt */ + printk(BIOS_SPEW, " ... raise SMI#\n"); + outb(0x00, APM_CNT); +} + +void southbridge_clear_smi_status(void) +{ + /* Clear SMI status */ + clear_smi_status(); + + /* Clear PM1 status */ + clear_pm1_status(); + + /* Set EOS bit so other SMIs can occur. */ + enable_smi(EOS); +} + +void smm_setup_structures(void *gnvs, void *tcg, void *smi1) +{ + /* + * Issue SMI to set the gnvs pointer in SMM. + * tcg and smi1 are unused. + * + * EAX = APM_CNT_GNVS_UPDATE + * EBX = gnvs pointer + * EDX = APM_CNT + */ + asm volatile ( + "outb %%al, %%dx\n\t" + : /* ignore result */ + : "a" (APM_CNT_GNVS_UPDATE), + "b" ((u32)gnvs), + "d" (APM_CNT) + ); +} + diff --git a/src/soc/intel/apollolake/smihandler.c b/src/soc/intel/apollolake/smihandler.c new file mode 100644 index 0000000..ec664c7 --- /dev/null +++ b/src/soc/intel/apollolake/smihandler.c @@ -0,0 +1,407 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2013 Google Inc. + * Copyright (C) 2015-2016 Intel Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <arch/hlt.h> +#include <arch/io.h> +#include <console/console.h> +#include <cpu/x86/cache.h> +#include <cpu/x86/smm.h> +#include <device/pci_def.h> +#include <elog.h> +#include <soc/nvs.h> +#include <soc/pm.h> +#include <soc/gpio.h> +#include <soc/iomap.h> +#include <spi-generic.h> +#include <stdint.h> +#include <stdlib.h> + +/* GNVS needs to be set by coreboot initiating a software SMI. */ +static struct global_nvs_t *gnvs; +static int smm_initialized; + + +void southbridge_smi_set_eos(void) +{ + enable_smi(EOS); +} + +struct global_nvs_t *smm_get_gnvs(void) +{ + return gnvs; +} + +static void busmaster_disable_on_bus(int bus) +{ + int slot, func; + unsigned int val; + unsigned char hdr; + + for (slot = 0; slot < 0x20; slot++) { + for (func = 0; func < 8; func++) { + u32 reg32; + device_t dev = PCI_DEV(bus, slot, func); + + val = pci_read_config32(dev, PCI_VENDOR_ID); + + if (val == 0xffffffff || val == 0x00000000 || + val == 0x0000ffff || val == 0xffff0000) + continue; + + /* Disable Bus Mastering for this one device */ + reg32 = pci_read_config32(dev, PCI_COMMAND); + reg32 &= ~PCI_COMMAND_MASTER; + pci_write_config32(dev, PCI_COMMAND, reg32); + + /* If this is a bridge, then follow it. */ + hdr = pci_read_config8(dev, PCI_HEADER_TYPE); + hdr &= 0x7f; + if (hdr == PCI_HEADER_TYPE_BRIDGE || + hdr == PCI_HEADER_TYPE_CARDBUS) { + unsigned int buses; + buses = pci_read_config32(dev, PCI_PRIMARY_BUS); + busmaster_disable_on_bus((buses >> 8) & 0xff); + } + } + } +} + + +static void southbridge_smi_sleep(void) +{ + uint32_t reg32; + uint8_t slp_typ; + + /* First, disable further SMIs */ + disable_smi(SLP_SMI_EN); + + /* Figure out SLP_TYP */ + reg32 = inl(ACPI_PMIO_BASE + PM1_CNT); + printk(BIOS_SPEW, "SMI#: SLP = 0x%08x\n", reg32); + slp_typ = (reg32 >> 10) & 7; + + /* Do any mainboard sleep handling */ + mainboard_smi_sleep(slp_typ-2); + +#if IS_ENABLED(CONFIG_ELOG_GSMI) + /* Log S3, S4, and S5 entry */ + if (slp_typ >= 5) + elog_add_event_byte(ELOG_TYPE_ACPI_ENTER, slp_typ-2); +#endif + /* Clear pending GPE events */ + clear_gpe_status(); + + /* Next, do the deed. */ + + switch (slp_typ) { + case SLP_TYP_S0: + printk(BIOS_DEBUG, "SMI#: Entering S0 (On)\n"); + break; + case SLP_TYP_S3: + printk(BIOS_DEBUG, "SMI#: Entering S3 (Suspend-To-RAM)\n"); + + /* Invalidate the cache before going to S3 */ + wbinvd(); + break; + case SLP_TYP_S4: + printk(BIOS_DEBUG, "SMI#: Entering S4 (Suspend-To-Disk)\n"); + break; + case SLP_TYP_S5: + printk(BIOS_DEBUG, "SMI#: Entering S5 (Soft Power off)\n"); + + /* Disable all GPE */ + disable_all_gpe(); + + /* also iterates over all bridges on bus 0 */ + busmaster_disable_on_bus(0); + break; + default: + printk(BIOS_DEBUG, "SMI#: ERROR: SLP_TYP reserved\n"); + break; + } + /* Clear pending wake status bit to avoid immediate wake */ + + /* Tri-state specific GPIOS to avoid leakage during S3/S5 */ + + /* + * Write back to the SLP register to cause the originally intended + * event again. We need to set BIT13 (SLP_EN) though to make the + * sleep happen. + */ + enable_pm1_control(SLP_EN); + + /* Make sure to stop executing code here for S3/S4/S5 */ + if (slp_typ > 1) + hlt(); + + /* + * In most sleep states, the code flow of this function ends at + * the line above. However, if we entered sleep state S1 and wake + * up again, we will continue to execute code in this function. + */ + reg32 = inl(ACPI_PMIO_BASE + PM1_CNT); + if (reg32 & SCI_EN) { + /* The OS is not an ACPI OS, so we set the state to S0 */ + disable_pm1_control(SLP_EN | SLP_TYP); + } +} +/* + * Look for Synchronous IO SMI and use save state from that + * core in case we are not running on the same core that + * initiated the IO transaction. + */ +static em64t100_smm_state_save_area_t *smi_apmc_find_state_save(uint8_t cmd) +{ + em64t100_smm_state_save_area_t *state; + int node; + + /* Check all nodes looking for the one that issued the IO */ + for (node = 0; node < CONFIG_MAX_CPUS; node++) { + state = smm_get_save_state(node); + + /* Check for Synchronous IO (bit0==1) */ + if (!(state->io_misc_info & (1 << 0))) + continue; + + /* Make sure it was a write (bit4==0) */ + if (state->io_misc_info & (1 << 4)) + continue; + + /* Check for APMC IO port */ + if (((state->io_misc_info >> 16) & 0xff) != APM_CNT) + continue; + + /* Check AX against the requested command */ + if ((state->rax & 0xff) != cmd) + continue; + + return state; + } + + return NULL; +} + +#if IS_ENABLED(CONFIG_ELOG_GSMI) +static void southbridge_smi_gsmi(void) +{ + u32 *ret, *param; + uint8_t sub_command; + em64t100_smm_state_save_area_t *io_smi = + smi_apmc_find_state_save(ELOG_GSMI_APM_CNT); + + if (!io_smi) + return; + + /* Command and return value in EAX */ + ret = (u32 *)&io_smi->rax; + sub_command = (uint8_t)(*ret >> 8); + + /* Parameter buffer in EBX */ + param = (u32 *)&io_smi->rbx; + + /* drivers/elog/gsmi.c */ + *ret = gsmi_exec(sub_command, param); +} +#endif + +static void finalize(void) +{ + static int finalize_done; + + if (finalize_done) { + printk(BIOS_DEBUG, "SMM already finalized.\n"); + return; + } + finalize_done = 1; + +#if IS_ENABLED(CONFIG_SPI_FLASH_SMM) + /* Re-init SPI driver to handle locked BAR */ +// spi_init(); TODO +#endif +} + +static void southbridge_smi_apmc(void) +{ + uint8_t reg8; + em64t100_smm_state_save_area_t *state; + + /* Emulate B2 register as the FADT / Linux expects it */ + + reg8 = inb(APM_CNT); + switch (reg8) { + case APM_CNT_CST_CONTROL: + /* + * Calling this function seems to cause + * some kind of race condition in Linux + * and causes a kernel oops + */ + printk(BIOS_DEBUG, "C-state control\n"); + break; + case APM_CNT_PST_CONTROL: + /* + * Calling this function seems to cause + * some kind of race condition in Linux + * and causes a kernel oops + */ + printk(BIOS_DEBUG, "P-state control\n"); + break; + case APM_CNT_ACPI_DISABLE: + disable_pm1_control(SCI_EN); + printk(BIOS_DEBUG, "SMI#: ACPI disabled.\n"); + break; + case APM_CNT_ACPI_ENABLE: + enable_pm1_control(SCI_EN); + printk(BIOS_DEBUG, "SMI#: ACPI enabled.\n"); + break; + case APM_CNT_GNVS_UPDATE: + if (smm_initialized) { + printk(BIOS_DEBUG, + "SMI#: SMM structures already initialized!\n"); + return; + } + state = smi_apmc_find_state_save(reg8); + if (state) { + /* EBX in the state save contains the GNVS pointer */ + gnvs = (struct global_nvs_t *)((uint32_t)state->rbx); + smm_initialized = 1; + printk(BIOS_DEBUG, "SMI#: Setting GNVS to %p\n", gnvs); + } + break; +#if IS_ENABLED(CONFIG_ELOG_GSMI) + case ELOG_GSMI_APM_CNT: + southbridge_smi_gsmi(); + break; +#endif + case APM_CNT_FINALIZE: + finalize(); + break; + } + + mainboard_smi_apmc(reg8); +} + +static void southbridge_smi_pm1(void) +{ + uint16_t pm1_sts = clear_pm1_status(); + + /* + * While OSPM is not active, poweroff immediately + * on a power button event. + */ + if (pm1_sts & PWRBTN_STS) { + /* power button pressed */ +#if IS_ENABLED(CONFIG_ELOG_GSMI) + elog_add_event(ELOG_TYPE_POWER_BUTTON); +#endif + disable_pm1_control(-1UL); + enable_pm1_control(SLP_EN | (SLP_TYP_S5 << SLP_TYP_SHIFT)); + } +} + +static void southbridge_smi_gpe0(void) +{ + clear_gpe_status(); +} + +static void southbridge_smi_tco(void) +{ + uint32_t tco_sts = clear_tco_status(); + + /* Any TCO event? */ + if (!tco_sts) + return; + + if (tco_sts & TCO_TIMEOUT) { /* TIMEOUT */ + /* Handle TCO timeout */ + printk(BIOS_DEBUG, "TCO Timeout.\n"); + } +} + +static void southbridge_smi_periodic(void) +{ + uint32_t reg32; + + reg32 = inl(ACPI_PMIO_BASE + SMI_EN); + + /* Are periodic SMIs enabled? */ + if ((reg32 & PERIODIC_EN) == 0) + return; + printk(BIOS_DEBUG, "Periodic SMI.\n"); +} + +typedef void (*smi_handler_t)(void); + +static const smi_handler_t southbridge_smi[32] = { + NULL, /* [0] reserved */ + NULL, /* [1] reserved */ + NULL, /* [2] BIOS_STS */ + NULL, /* [3] LEGACY_USB_STS */ + southbridge_smi_sleep, /* [4] SLP_SMI_STS */ + southbridge_smi_apmc, /* [5] APM_STS */ + NULL, /* [6] SWSMI_TMR_STS */ + NULL, /* [7] reserved */ + southbridge_smi_pm1, /* [8] PM1_STS */ + southbridge_smi_gpe0, /* [9] GPE0_STS */ + NULL, /* [10] reserved */ + NULL, /* [11] reserved */ + NULL, /* [12] reserved */ + southbridge_smi_tco, /* [13] TCO_STS */ + southbridge_smi_periodic, /* [14] PERIODIC_STS */ + NULL, /* [15] SERIRQ_SMI_STS */ + NULL, /* [16] SMBUS_SMI_STS */ + NULL, /* [17] LEGACY_USB2_STS */ + NULL, /* [18] INTEL_USB2_STS */ + NULL, /* [19] reserved */ + NULL, /* [20] PCI_EXP_SMI_STS */ + NULL, /* [21] reserved */ + NULL, /* [22] reserved */ + NULL, /* [23] reserved */ + NULL, /* [24] reserved */ + NULL, /* [25] reserved */ + NULL, /* [26] SPI_STS */ + NULL, /* [27] reserved */ + NULL, /* [28] PUNIT */ + NULL, /* [29] GUNIT */ + NULL, /* [30] reserved */ + NULL /* [31] reserved */ +}; + +void southbridge_smi_handler(void) +{ + int i; + uint32_t smi_sts; + + /* + * We need to clear the SMI status registers, or we won't see what's + * happening in the following calls. + */ + smi_sts = clear_smi_status(); + + /* Call SMI sub handler for each of the status bits */ + for (i = 0; i < ARRAY_SIZE(southbridge_smi); i++) { + if (!(smi_sts & (1 << i))) + continue; + + if (southbridge_smi[i] != NULL) { + southbridge_smi[i](); + } else { + printk(BIOS_DEBUG, + "SMI_STS[%d] occurred, but no " + "handler available.\n", i); + } + } +} + diff --git a/src/soc/intel/apollolake/smmrelocate.c b/src/soc/intel/apollolake/smmrelocate.c new file mode 100644 index 0000000..c53106f --- /dev/null +++ b/src/soc/intel/apollolake/smmrelocate.c @@ -0,0 +1,432 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2014 Google Inc. + * Copyright (C) 2015-2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <types.h> +#include <string.h> +#include <device/device.h> +#include <device/pci.h> +#include <cpu/cpu.h> +#include <cpu/x86/cache.h> +#include <cpu/x86/lapic.h> +#include <cpu/x86/mp.h> +#include <cpu/x86/msr.h> +#include <cpu/x86/mtrr.h> +#include <cpu/x86/smm.h> +#include <console/console.h> +#include <soc/cpu.h> +#include <soc/smm.h> + +/* This gets filled in and used during relocation. */ +static struct smm_relocation_params smm_reloc_params; + +static inline void write_smrr(struct smm_relocation_params *relo_params) +{ + printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n", + relo_params->smrr_base.lo, relo_params->smrr_mask.lo); + wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base); + wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask); +} + +static inline void write_uncore_emrr(struct smm_relocation_params *relo_params) +{ + printk(BIOS_DEBUG, + "Writing UNCORE_EMRR. base = 0x%08x, mask=0x%08x\n", + relo_params->uncore_emrr_base.lo, + relo_params->uncore_emrr_mask.lo); + wrmsr(MSR_UNCORE_PRMRR_PHYS_BASE, relo_params->uncore_emrr_base); + wrmsr(MSR_UNCORE_PRMRR_PHYS_MASK, relo_params->uncore_emrr_mask); +} + +static void update_save_state(int cpu, + struct smm_relocation_params *relo_params, + const struct smm_runtime *runtime) +{ + u32 smbase; + u32 iedbase; + + /* + * The relocated handler runs with all CPUs concurrently. Therefore + * stagger the entry points adjusting SMBASE downwards by save state + * size * CPU num. + */ + smbase = relo_params->smram_base - cpu * runtime->save_state_size; + iedbase = relo_params->ied_base; + + printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n", + smbase, iedbase); + + /* + * All threads need to set IEDBASE and SMBASE to the relocated + * handler region. However, the save state location depends on the + * smm_save_state_in_msrs field in the relocation parameters. If + * smm_save_state_in_msrs is non-zero then the CPUs are relocating + * the SMM handler in parallel, and each CPUs save state area is + * located in their respective MSR space. If smm_save_state_in_msrs + * is zero then the SMM relocation is happening serially so the + * save state is at the same default location for all CPUs. + */ + if (relo_params->smm_save_state_in_msrs) { + msr_t smbase_msr; + msr_t iedbase_msr; + + smbase_msr.lo = smbase; + smbase_msr.hi = 0; + + /* + * According the BWG the IEDBASE MSR is in bits 63:32. It's + * not clear why it differs from the SMBASE MSR. + */ + iedbase_msr.lo = 0; + iedbase_msr.hi = iedbase; + + wrmsr(MSR_SMBASE, smbase_msr); + wrmsr(MSR_IEDBASE, iedbase_msr); + } else { + em64t101_smm_state_save_area_t *save_state; + + save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE - + runtime->save_state_size); + + save_state->smbase = smbase; + save_state->iedbase = iedbase; + } +} + +/* Returns 1 if SMM MSR save state was set. */ +static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params) +{ + msr_t smm_mca_cap; + + smm_mca_cap = rdmsr(MSR_SMM_MCA_CAP); + if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) { + msr_t smm_feature_control; + + smm_feature_control = rdmsr(MSR_SMM_FEATURE_CONTROL); + smm_feature_control.hi = 0; + smm_feature_control.lo |= SMM_CPU_SAVE_EN; + wrmsr(MSR_SMM_FEATURE_CONTROL, smm_feature_control); + relo_params->smm_save_state_in_msrs = 1; + } + return relo_params->smm_save_state_in_msrs; +} + +/* + * The relocation work is actually performed in SMM context, but the code + * resides in the ramstage module. This occurs by trampolining from the default + * SMRAM entry point to here. + */ +static void asmlinkage cpu_smm_do_relocation(void *arg) +{ + msr_t mtrr_cap; + struct smm_relocation_params *relo_params; + const struct smm_module_params *p; + const struct smm_runtime *runtime; + int cpu; + + p = arg; + runtime = p->runtime; + relo_params = p->arg; + cpu = p->cpu; + + if (cpu >= CONFIG_MAX_CPUS) { + printk(BIOS_CRIT, + "Invalid CPU number assigned in SMM stub: %d\n", cpu); + return; + } + + printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu); + + /* + * Determine if the processor supports saving state in MSRs. If so, + * enable it before the non-BSPs run so that SMM relocation can occur + * in parallel in the non-BSP CPUs. + */ + if (cpu == 0) { + /* + * If smm_save_state_in_msrs is 1 then that means this is the + * 2nd time through the relocation handler for the BSP. + * Parallel SMM handler relocation is taking place. However, + * it is desired to access other CPUs save state in the real + * SMM handler. Therefore, disable the SMM save state in MSRs + * feature. + */ + if (relo_params->smm_save_state_in_msrs) { + msr_t smm_feature_control; + + smm_feature_control = rdmsr(MSR_SMM_FEATURE_CONTROL); + smm_feature_control.lo &= ~SMM_CPU_SAVE_EN; + wrmsr(MSR_SMM_FEATURE_CONTROL, smm_feature_control); + } else if (bsp_setup_msr_save_state(relo_params)) + /* + * Just return from relocation handler if MSR save + * state is enabled. In that case the BSP will come + * back into the relocation handler to setup the new + * SMBASE as well disabling SMM save state in MSRs. + */ + return; + } + + /* Make appropriate changes to the save state map. */ + update_save_state(cpu, relo_params, runtime); + + /* Write EMRR and SMRR MSRs based on indicated support. */ + mtrr_cap = rdmsr(MTRR_CAP_MSR); + if (mtrr_cap.lo & SMRR_SUPPORTED) + write_smrr(relo_params); +} + +static void fill_in_relocation_params(struct smm_relocation_params *params) +{ + void *handler_base; + size_t handler_size; + void *ied_base; + size_t ied_size; + void *tseg_base; + size_t tseg_size; + u32 emrr_base; + u32 emrr_size; + int phys_bits; + /* All range registers are aligned to 4KiB */ + const u32 rmask = ~((1 << 12) - 1); + + /* + * Some of the range registers are dependent on the number of physical + * address bits supported. + */ + phys_bits = cpuid_eax(0x80000008) & 0xff; + + smm_region(&tseg_base, &tseg_size); + smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size); + smm_subregion(SMM_SUBREGION_CHIPSET, &ied_base, &ied_size); + + params->smram_size = handler_size; + params->smram_base = (uintptr_t)handler_base; + + params->ied_base = (uintptr_t)ied_base; + params->ied_size = ied_size; + + /* SMRR has 32-bits of valid address aligned to 4KiB. */ + params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK; + params->smrr_base.hi = 0; + params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; + params->smrr_mask.hi = 0; + + /* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */ + emrr_base = (params->ied_base + (2 << 20)) & rmask; + emrr_size = params->ied_size - (2 << 20); + + /* + * EMRR has 46 bits of valid address aligned to 4KiB. It's dependent + * on the number of physical address bits supported. + */ + params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK; + params->emrr_base.hi = 0; + params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; + params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1; + + /* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */ + params->uncore_emrr_base.lo = emrr_base; + params->uncore_emrr_base.hi = 0; + params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) | + MTRR_PHYS_MASK_VALID; + params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; +} + +static void adjust_apic_id_map(struct smm_loader_params *smm_params) +{ + struct smm_runtime *runtime; + int i; + + /* Adjust the APIC id map if HT is disabled. */ + if (!ht_disabled) + return; + + runtime = smm_params->runtime; + + /* The APIC ids increment by 2 when HT is disabled. */ + for (i = 0; i < CONFIG_MAX_CPUS; i++) + runtime->apic_id_to_cpu[i] = runtime->apic_id_to_cpu[i] * 2; +} + +static int install_relocation_handler(int num_cpus, + struct smm_relocation_params *relo_params) +{ + /* + * The default SMM entry can happen in parallel or serially. If the + * default SMM entry is done in parallel the BSP has already setup + * the saving state to each CPU's MSRs. At least one save state size + * is required for the initial SMM entry for the BSP to determine if + * parallel SMM relocation is even feasible. Set the stack size to + * the save state size, and call into the do_relocation handler. + */ + int save_state_size = sizeof(em64t101_smm_state_save_area_t); + struct smm_loader_params smm_params = { + .per_cpu_stack_size = save_state_size, + .num_concurrent_stacks = num_cpus, + .per_cpu_save_state_size = save_state_size, + .num_concurrent_save_states = 1, + .handler = (smm_handler_t)&cpu_smm_do_relocation, + .handler_arg = (void *)relo_params, + }; + + if (smm_setup_relocation_handler(&smm_params)) + return -1; + + adjust_apic_id_map(&smm_params); + + return 0; +} + +static void setup_ied_area(struct smm_relocation_params *params) +{ + char *ied_base; + + struct ied_header ied = { + .signature = "INTEL RSVD", + .size = params->ied_size, + .reserved = {0}, + }; + + ied_base = (void *)params->ied_base; + + printk(BIOS_DEBUG, "IED base = 0x%08x\n", params->ied_base); + printk(BIOS_DEBUG, "IED size = 0x%08x\n", params->ied_size); + + /* Place IED header at IEDBASE. */ + memcpy(ied_base, &ied, sizeof(ied)); + + /* Zero out 32KiB at IEDBASE + 1MiB */ + memset(ied_base + (1 << 20), 0, (32 << 10)); +} + +static int install_permanent_handler(int num_cpus, + struct smm_relocation_params *relo_params) +{ + /* + * There are num_cpus concurrent stacks and num_cpus concurrent save + * state areas. Lastly, set the stack size to the save state size. + */ + int save_state_size = sizeof(em64t101_smm_state_save_area_t); + struct smm_loader_params smm_params = { + .per_cpu_stack_size = save_state_size, + .num_concurrent_stacks = num_cpus, + .per_cpu_save_state_size = save_state_size, + .num_concurrent_save_states = num_cpus, + }; + + printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", + relo_params->smram_base); + if (smm_load_module((void *)relo_params->smram_base, + relo_params->smram_size, &smm_params)) + return -1; + + adjust_apic_id_map(&smm_params); + + return 0; +} + +static int cpu_smm_setup(void) +{ + int num_cpus; + msr_t msr; + + printk(BIOS_DEBUG, "Setting up SMI for CPU\n"); + + fill_in_relocation_params(&smm_reloc_params); + + if (smm_reloc_params.ied_size) + setup_ied_area(&smm_reloc_params); + + msr = rdmsr(MSR_CORE_THREAD_COUNT); + num_cpus = msr.lo & 0xffff; + if (num_cpus > CONFIG_MAX_CPUS) { + printk(BIOS_CRIT, + "Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n", + num_cpus, CONFIG_MAX_CPUS); + } + + if (install_relocation_handler(num_cpus, &smm_reloc_params)) { + printk(BIOS_CRIT, "SMM Relocation handler install failed.\n"); + return -1; + } + + if (install_permanent_handler(num_cpus, &smm_reloc_params)) { + printk(BIOS_CRIT, "SMM Permanent handler install failed.\n"); + return -1; + } + + /* Ensure the SMM handlers hit DRAM before performing first SMI. */ + wbinvd(); + + return 0; +} + +int smm_initialize(void) +{ + /* Return early if CPU SMM setup failed. */ + if (cpu_smm_setup()) + return -1; + + /* Clear the SMM state in the southbridge. */ + southbridge_smm_clear_state(); + + /* Run the relocation handler. */ + smm_initiate_relocation(); + + if (smm_reloc_params.smm_save_state_in_msrs) + printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n"); + + return 0; +} + +void smm_relocate(void) +{ + /* + * If smm_save_state_in_msrs is non-zero then parallel SMM relocation + * shall take place. Run the relocation handler a second time on the + * BSP to do * the final move. For APs, a relocation handler always + * needs to be run. + */ + if (smm_reloc_params.smm_save_state_in_msrs) + smm_initiate_relocation_parallel(); + else if (!boot_cpu()) + smm_initiate_relocation(); +} + +void smm_init(void) +{ + /* + * smm_init() is normally called from initialize_cpus() in + * lapic_cpu_init.c. However, that path is no longer used. Don't reuse + * the function name because that would cause confusion. + * The smm_initialize() function above is used to setup SMM at the + * appropriate time. + */ +} + +void smm_lock(void) +{ + /* + * LOCK the SMM memory window and enable normal SMM. + * After running this function, only a full reset can + * make the SMM registers writable again. + */ + printk(BIOS_DEBUG, "Locking SMM.\n"); +#if ENV_RAMSTAGE + write32((void *)(CONFIG_MMCONF_BASE_ADDRESS + SMRAM), + D_LCK | G_SMRAME | C_BASE_SEG); +#else + pci_write_config8(PCI_DEV(0, 0, 0), SMRAM, D_LCK | G_SMRAME + | C_BASE_SEG); +#endif +} +