frank vibrans has uploaded this change for review. ( https://review.coreboot.org/20916
Change subject: Initial Kahlee SMM code. ......................................................................
Initial Kahlee SMM code.
This code implements SMM handling using an ASEG implementation. There is no real functional capability in place. SMIs are simply cleared and SMM is exited.
Change-Id: Ifeca4323626af6089ce23892e79b0e560d92c100 Signed-off-by: frank vibrans frank.vibrans@scarletltd.com --- M src/cpu/x86/smm/smmhandler.S M src/include/cpu/x86/smm.h M src/soc/amd/stoneyridge/Kconfig M src/soc/amd/stoneyridge/include/soc/smi.h M src/soc/amd/stoneyridge/model_15_init.c M src/soc/amd/stoneyridge/smi.c M src/soc/amd/stoneyridge/smihandler.c 7 files changed, 135 insertions(+), 3 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/16/20916/1
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index dd8a0c0..0617a0d 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -142,7 +142,8 @@ * without relying on the LAPIC ID. */ #if IS_ENABLED(CONFIG_CPU_AMD_AGESA_FAMILY15_TN) \ - || IS_ENABLED(CONFIG_CPU_AMD_AGESA_FAMILY15_RL) + || IS_ENABLED(CONFIG_CPU_AMD_AGESA_FAMILY15_RL) \ + || IS_ENABLED(CONFIG_CPU_AMD_AGESA_BINARY_PI) /* LAPIC IDs start from 0x10; map that to the proper core index */ subl $0x10, %ecx #endif diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h index bd0e356..aea4100 100644 --- a/src/include/cpu/x86/smm.h +++ b/src/include/cpu/x86/smm.h @@ -37,6 +37,8 @@ * starts @ 0x7e00 */ #define SMM_AMD64_ARCH_OFFSET 0x7e00 +#define SMM_AMD64_SAVE_STATE_OFFSET \ + SMM_SAVE_STATE_BEGIN(SMM_AMD64_ARCH_OFFSET)
typedef struct { u16 es_selector; diff --git a/src/soc/amd/stoneyridge/Kconfig b/src/soc/amd/stoneyridge/Kconfig index 0425beb..19aa1b2 100644 --- a/src/soc/amd/stoneyridge/Kconfig +++ b/src/soc/amd/stoneyridge/Kconfig @@ -281,6 +281,10 @@ to FEDC_6FFFh. UART controller 1 registers range from FEDC_8000h to FEDC_8FFFh.
+config HAVE_SMI_HANDLER + bool "Enable SMM and SMI handling" + default y + config SMM_TSEG_SIZE hex default 0x800000 if HAVE_SMI_HANDLER diff --git a/src/soc/amd/stoneyridge/include/soc/smi.h b/src/soc/amd/stoneyridge/include/soc/smi.h index 7a3c804..016ea4a 100644 --- a/src/soc/amd/stoneyridge/include/soc/smi.h +++ b/src/soc/amd/stoneyridge/include/soc/smi.h @@ -20,7 +20,24 @@ #define SMITRG0_EOS (1 << 28) #define SMITRG0_SMIENB (1 << 31)
+#define SMI_REG_GPESTAT 0x00 /* GPE event status */ + +#define SMI_REG_SMISTAT0 0x80 +#define SMI_REG_SMISTAT1 0x84 +#define SMI_REG_SMISTAT2 0x88 +#define SMI_REG_SMISTAT3 0x8C +#define SMI_REG_SMISTAT4 0x90 + #define SMI_REG_CONTROL0 0xa0 + +#define SMI_REG_SMICTRL2 0xA8 /* Routes FakeSMIs reported in SMIx84 */ +#define SMICTRL2_FAKES_EN 0x00000054 /* Enable FakeSMIs reported in SMIx84 */ + +#define SMI_REG_SMICTRL8 0xC0 /* Routes FakeSMIs reported in SMIx90 */ +#define SMICTRL8_FAKES_EN 0x01500000 /* Enable FakeSMIs reported in SMIx90 */ + +#define SMI_REG_SMICTRL9 0xC4 /* Routes trap events reported in SMIx90 */ +#define SMICTRL9_MTRAP_EN 0x00010000 /* Enable memory trap SMI reported in SMIx90 */
enum smi_mode { SMI_MODE_DISABLE = 0, @@ -54,6 +71,9 @@ write16((void *)(SMI_BASE + offset), value); }
+void smm_southbridge_enable(void); +void smm_southbridge_clear_state(void); + void hudson_configure_gevent_smi(uint8_t gevent, uint8_t mode, uint8_t level); void hudson_disable_gevent_smi(uint8_t gevent); void hudson_enable_acpi_cmd_smi(void); diff --git a/src/soc/amd/stoneyridge/model_15_init.c b/src/soc/amd/stoneyridge/model_15_init.c index a46f322..6003bef 100644 --- a/src/soc/amd/stoneyridge/model_15_init.c +++ b/src/soc/amd/stoneyridge/model_15_init.c @@ -27,6 +27,7 @@ #include <cpu/cpu.h> #include <cpu/x86/cache.h> #include <cpu/x86/mtrr.h> +#include <cpu/x86/smm.h> #include <cpu/amd/amdfam15.h> #include <arch/acpi.h>
@@ -54,6 +55,9 @@ u8 i; msr_t msr; int msrno; +#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) + unsigned int cpu_idx; +#endif #if IS_ENABLED(CONFIG_LOGICAL_CPUS) u32 siblings; #endif @@ -114,6 +118,21 @@ wrmsr(NB_CFG_MSR, msr);
+ if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) { + cpu_idx = cpu_info()->index; + printk(BIOS_INFO, "Initializing SMM for CPU %u\n", cpu_idx); + + /* Set SMM base address for this CPU */ + msr = rdmsr(MSR_SMM_BASE); + msr.lo = SMM_BASE - (cpu_idx * 0x400); + wrmsr(MSR_SMM_BASE, msr); + + /* Enable the SMM memory window */ + msr = rdmsr(MSR_SMM_MASK); + msr.lo |= ((1 << 0) | (6 << 8)); /* Enable ASEG SMRAM Range as WB */ + wrmsr(MSR_SMM_MASK, msr); + } + /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); diff --git a/src/soc/amd/stoneyridge/smi.c b/src/soc/amd/stoneyridge/smi.c index c92697c..e8c1180 100644 --- a/src/soc/amd/stoneyridge/smi.c +++ b/src/soc/amd/stoneyridge/smi.c @@ -8,12 +8,22 @@
#include <console/console.h> +#include <arch/io.h> #include <cpu/cpu.h> +#include <cpu/x86/lapic.h> +#include <cpu/x86/msr.h> +#include <cpu/x86/mtrr.h> +#include <cpu/amd/mtrr.h> +#include <cpu/amd/msr.h> +#include <cpu/x86/cache.h> +#include <cpu/x86/smm.h> #include <soc/smi.h> +#include <string.h> +
void smm_setup_structures(void *gnvs, void *tcg, void *smi1) { - printk(BIOS_DEBUG, "smm_setup_structures STUB!!!\n"); + printk(BIOS_DEBUG, "smm_setup_structures - STUB.\n"); }
/** Set the EOS bit and enable SMI generation from southbridge */ @@ -24,3 +34,57 @@ reg |= SMITRG0_EOS; /* Set EOS bit */ smi_write32(SMI_REG_SMITRIG0, reg); } + +/* Sets up ASEG MTRR and copies the SMM code to the ASEG. */ +void smm_init(void) +{ + + msr_t msr, syscfg_orig, mtrr_aseg_orig; + + printk(BIOS_DEBUG, "SMM_mem_init\n"); + + /* Back up MSRs for later restore */ + syscfg_orig = rdmsr(SYSCFG_MSR); + mtrr_aseg_orig = rdmsr(MTRR_FIX_16K_A0000); + + /* MTRR changes don't like an enabled cache */ + disable_cache(); + + msr = syscfg_orig; + + /* Allow changes to MTRR extended attributes */ + msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; + /* turn the extended attributes off until we fix + * them so A0000 is routed to memory + */ + msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; + wrmsr(SYSCFG_MSR, msr); + + /* set DRAM access to 0xa0000-0xbffff to read, write, UC */ + msr.hi = msr.lo = 0x18181818; + wrmsr(MTRR_FIX_16K_A0000, msr); + + /* enable the extended features */ + msr = syscfg_orig; + msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; + msr.lo |= SYSCFG_MSR_MtrrFixDramEn; + wrmsr(SYSCFG_MSR, msr); + + enable_cache(); + + /* copy the real SMM handler */ + memcpy((void *)SMM_BASE, _binary_smm_start, + _binary_smm_end - _binary_smm_start); + wbinvd(); + + disable_cache(); + + /* Restore SYSCFG and MTRR */ + wrmsr(SYSCFG_MSR, syscfg_orig); + wrmsr(MTRR_FIX_16K_A0000, mtrr_aseg_orig); + + enable_cache(); + + /* CPU MSR are set in CPU init */ +} + diff --git a/src/soc/amd/stoneyridge/smihandler.c b/src/soc/amd/stoneyridge/smihandler.c index 5a646a6..cb67801 100644 --- a/src/soc/amd/stoneyridge/smihandler.c +++ b/src/soc/amd/stoneyridge/smihandler.c @@ -110,8 +110,30 @@ smi_write32(0x90, status); }
+void smm_southbridge_enable(void) +{ +} + +void smm_southbridge_clear_state(void) +{ + uint32_t reg = smi_read32(SMI_REG_GPESTAT); + smi_write32(SMI_REG_GPESTAT, reg); + + reg = smi_read32(SMI_REG_SMISTAT0); + smi_write32(SMI_REG_SMISTAT0, reg); + + reg = smi_read32(SMI_REG_SMISTAT1); + smi_write32(SMI_REG_SMISTAT1, reg); + + reg = smi_read32(SMI_REG_SMISTAT2); + smi_write32(SMI_REG_SMISTAT2, reg); + + reg = smi_read32(SMI_REG_SMISTAT4); + smi_write32(SMI_REG_SMISTAT4, reg); +} + void southbridge_smi_handler(unsigned int node, - smm_state_save_area_t *state_save) + smm_state_save_area_t *state_save) { const uint16_t smi_src = smi_read16(0x94);