Rudolf Marek (r.marek@assembler.cz) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/82
-gerrit
commit 77439f041fc6348cdaaa682d5cbe8c41f3f96241 Author: Rudolf Marek r.marek@assembler.cz Date: Sat Jul 2 16:36:17 2011 +0200
Make AMD SMM SMP aware
Move the SMM MSR init to a code run per CPU. Introduce global SMM_BASE define, later all 0xa0000 could be changed to use it. Remove the unnecessary test if the smm_init routine is called once (it is called by BSP only) and also remove if lock bit is set becuase this bit is cleared by INIT it seems. Add the defines for fam10h and famfh to respective files, we do not have any shared AMD MSR header file.
Tested on M2V-MX SE with dualcore CPU.
Change-Id: I1b2bf157d1cc79c566c9089689a9bfd9310f5683 Signed-off-by: Rudolf Marek r.marek@assembler.cz --- src/cpu/amd/model_10xxx/model_10xxx_init.c | 13 +++- src/cpu/amd/model_fxx/model_fxx_init.c | 16 ++++ src/cpu/amd/smm/smm_init.c | 124 ++++++++++------------------ src/include/cpu/amd/model_10xxx_msr.h | 4 + src/include/cpu/amd/model_fxx_msr.h | 4 + src/include/cpu/x86/smm.h | 3 + 6 files changed, 82 insertions(+), 82 deletions(-)
diff --git a/src/cpu/amd/model_10xxx/model_10xxx_init.c b/src/cpu/amd/model_10xxx/model_10xxx_init.c index a92852f..2e8bbfe 100644 --- a/src/cpu/amd/model_10xxx/model_10xxx_init.c +++ b/src/cpu/amd/model_10xxx/model_10xxx_init.c @@ -24,6 +24,7 @@ #include <device/pci.h> #include <string.h> #include <cpu/x86/msr.h> +#include <cpu/x86/smm.h> #include <cpu/x86/pae.h> #include <pc80/mc146818rtc.h> #include <cpu/x86/lapic.h> @@ -118,7 +119,17 @@ static void model_10xxx_init(device_t dev) msr.hi &= ~(1 << (35-32)); wrmsr(BU_CFG2_MSR, msr);
- /* Write protect SMM space with SMMLOCK. */ + /* Set SMM base address for this CPU */ + msr = rdmsr(SMM_BASE_MSR); + msr.lo = SMM_BASE - (lapicid() * 0x400); + wrmsr(SMM_BASE_MSR, msr); + + /* Enable the SMM memory window */ + msr = rdmsr(SMM_MASK_MSR); + msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */ + wrmsr(SMM_MASK_MSR, msr); + + /* Set SMMLOCK to avoid exploits messing with SMM */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); diff --git a/src/cpu/amd/model_fxx/model_fxx_init.c b/src/cpu/amd/model_fxx/model_fxx_init.c index 0608d0a..ce5c810 100644 --- a/src/cpu/amd/model_fxx/model_fxx_init.c +++ b/src/cpu/amd/model_fxx/model_fxx_init.c @@ -24,6 +24,7 @@ #include <cpu/cpu.h> #include <cpu/x86/cache.h> #include <cpu/x86/mtrr.h> +#include <cpu/x86/smm.h> #include <cpu/amd/multicore.h> #include <cpu/amd/model_fxx_msr.h>
@@ -547,6 +548,21 @@ static void model_fxx_init(device_t dev) */ if (id.coreid == 0) init_ecc_memory(id.nodeid); // only do it for core 0 + + /* Set SMM base address for this CPU */ + msr = rdmsr(SMM_BASE_MSR); + msr.lo = SMM_BASE - (lapicid() * 0x400); + wrmsr(SMM_BASE_MSR, msr); + + /* Enable the SMM memory window */ + msr = rdmsr(SMM_MASK_MSR); + msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */ + wrmsr(SMM_MASK_MSR, msr); + + /* Set SMMLOCK to avoid exploits messing with SMM */ + msr = rdmsr(HWCR_MSR); + msr.lo |= (1 << 0); + wrmsr(HWCR_MSR, msr); }
static struct device_operations cpu_dev_ops = { diff --git a/src/cpu/amd/smm/smm_init.c b/src/cpu/amd/smm/smm_init.c index ad1c112..6398688 100644 --- a/src/cpu/amd/smm/smm_init.c +++ b/src/cpu/amd/smm/smm_init.c @@ -30,94 +30,56 @@ #include <cpu/x86/smm.h> #include <string.h>
-#define SMM_BASE_MSR 0xc0010111 -#define SMM_ADDR_MSR 0xc0010112 -#define SMM_MASK_MSR 0xc0010113 -#define SMM_BASE 0xa0000 - extern unsigned char _binary_smm_start; extern unsigned char _binary_smm_size;
-static int smm_handler_copied = 0; - void smm_init(void) { - msr_t msr; - - msr = rdmsr(HWCR_MSR); - if (msr.lo & (1 << 0)) { - // This sounds like a bug... ? - printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n"); - return; - } - - /* Only copy SMM handler once, not once per CPU */ - if (!smm_handler_copied) { - msr_t syscfg_orig, mtrr_aseg_orig; - - smm_handler_copied = 1; - - /* Back up MSRs for later restore */ - syscfg_orig = rdmsr(SYSCFG_MSR); - mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); - - /* MTRR changes don't like an enabled cache */ - disable_cache(); - - msr = syscfg_orig; - /* Allow changes to MTRR extended attributes */ - msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; - /* turn the extended attributes off until we fix - * them so A0000 is routed to memory - */ - msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; - wrmsr(SYSCFG_MSR, msr); - - /* set DRAM access to 0xa0000 */ - /* A0000 is memory */ - msr.lo = 0x18181818; - msr.hi = 0x18181818; - wrmsr(MTRRfix16K_A0000_MSR, msr); - - /* enable the extended features */ - msr = syscfg_orig; - msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; - msr.lo |= SYSCFG_MSR_MtrrFixDramEn; - wrmsr(SYSCFG_MSR, msr); - - enable_cache(); - /* copy the real SMM handler */ - memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); - wbinvd(); - - /* Restore MTRR */ - disable_cache(); - - /* Restore SYSCFG */ - wrmsr(SYSCFG_MSR, syscfg_orig); - - wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); - enable_cache(); - } - - - /* But set SMM base address on all CPUs/cores */ - msr = rdmsr(SMM_BASE_MSR); - msr.lo = SMM_BASE - (lapicid() * 0x400); - wrmsr(SMM_BASE_MSR, msr); - - /* enable the SMM memory window */ - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= (1 << 0); // Enable ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - - /* Set SMMLOCK to avoid exploits messing with SMM */ - msr = rdmsr(HWCR_MSR); - msr.lo |= (1 << 0); - wrmsr(HWCR_MSR, msr); + msr_t msr, syscfg_orig, mtrr_aseg_orig; + + /* Back up MSRs for later restore */ + syscfg_orig = rdmsr(SYSCFG_MSR); + mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); + + /* MTRR changes don't like an enabled cache */ + disable_cache(); + + msr = syscfg_orig; + + /* Allow changes to MTRR extended attributes */ + msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; + /* turn the extended attributes off until we fix + * them so A0000 is routed to memory + */ + msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; + wrmsr(SYSCFG_MSR, msr); + + /* set DRAM access to 0xa0000 */ + msr.lo = 0x18181818; + msr.hi = 0x18181818; + wrmsr(MTRRfix16K_A0000_MSR, msr); + + /* enable the extended features */ + msr = syscfg_orig; + msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; + msr.lo |= SYSCFG_MSR_MtrrFixDramEn; + wrmsr(SYSCFG_MSR, msr); + + enable_cache(); + /* copy the real SMM handler */ + memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); + wbinvd(); + disable_cache(); + + /* Restore SYSCFG and MTRR */ + wrmsr(SYSCFG_MSR, syscfg_orig); + wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); + enable_cache(); + + /* CPU MSR are set in CPU init */ }
void smm_lock(void) { - /* We lock SMM per CPU core */ + /* We lock SMM in CPU init */ } diff --git a/src/include/cpu/amd/model_10xxx_msr.h b/src/include/cpu/amd/model_10xxx_msr.h index f497eb3..b34a281 100644 --- a/src/include/cpu/amd/model_10xxx_msr.h +++ b/src/include/cpu/amd/model_10xxx_msr.h @@ -22,6 +22,10 @@
#include <cpu/x86/msr.h>
+#define SMM_BASE_MSR 0xC0010111 +#define SMM_ADDR_MSR 0xC0010112 +#define SMM_MASK_MSR 0xC0010113 + #define HWCR_MSR 0xC0010015 #define NB_CFG_MSR 0xC001001f #define LS_CFG_MSR 0xC0011020 diff --git a/src/include/cpu/amd/model_fxx_msr.h b/src/include/cpu/amd/model_fxx_msr.h index b4795cb..2ac2d4e 100644 --- a/src/include/cpu/amd/model_fxx_msr.h +++ b/src/include/cpu/amd/model_fxx_msr.h @@ -1,6 +1,10 @@ #ifndef CPU_AMD_MODEL_FXX_MSR_H #define CPU_AMD_MODEL_FXX_MSR_H
+#define SMM_BASE_MSR 0xc0010111 +#define SMM_ADDR_MSR 0xc0010112 +#define SMM_MASK_MSR 0xc0010113 + #define HWCR_MSR 0xC0010015 #define NB_CFG_MSR 0xC001001f #define LS_CFG_MSR 0xC0011020 diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h index 49ee2be..c314c39 100644 --- a/src/include/cpu/x86/smm.h +++ b/src/include/cpu/x86/smm.h @@ -24,6 +24,9 @@ #ifndef CPU_X86_SMM_H #define CPU_X86_SMM_H
+/* used only by C programs so far */ +#define SMM_BASE 0xa0000 + #include <types.h> typedef struct { u16 es_selector;