Arthur Heymans has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/68895 )
Change subject: cpu/x86/smm: Don't save EFER ......................................................................
cpu/x86/smm: Don't save EFER
THe EFER MSR is in the SMM save state and RSM properly restores it.
Change-Id: Ie0e9584afd1f08f51ca57da5c4350042699f130d Signed-off-by: Arthur Heymans arthur@aheymans.xyz --- M src/cpu/x86/smm/smm_stub.S M src/cpu/x86/smm/smmhandler.S 2 files changed, 18 insertions(+), 62 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/95/68895/1
diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S index b35cf7f..25ede76 100644 --- a/src/cpu/x86/smm/smm_stub.S +++ b/src/cpu/x86/smm/smm_stub.S @@ -225,26 +225,18 @@ /* Align stack to 16 bytes. Another 32 bytes are pushed below. */ andl $0xfffffff0, %esp
-#if ENV_X86_64 - mov %ecx, %edi - /* Backup IA32_EFER. Preserves ebx. */ - movl $(IA32_EFER), %ecx - rdmsr - movl %eax, -0x8(%ebp) - movl %edx, -0xc(%ebp) - - /* entry64.inc preserves ebx, esi, edi */ -#include <cpu/x86/64bit/entry64.inc> - mov %edi, %ecx - -#endif - /* Call into the c-based SMM relocation function with the platform * parameters. Equivalent to: * struct arg = { cpu_num, canary }; * c_handler(&arg) */ #if ENV_X86_64 + mov %ecx, %edi + /* entry64.inc preserves ebx, esi, edi */ +#include <cpu/x86/64bit/entry64.inc> + mov %edi, %ecx + + push %rbx /* uintptr_t *canary */ push %rcx /* size_t cpu */
@@ -253,23 +245,6 @@ movabs c_handler, %eax call *%rax
- /* - * The only reason to go back to protected mode is that RSM doesn't restore - * MSR registers and MSR IA32_EFER was modified by entering long mode. - * Drop to protected mode to safely operate on the IA32_EFER MSR. - */ - - /* Disable long mode. */ - #include <cpu/x86/64bit/exit32.inc> - - /* Restore IA32_EFER as RSM doesn't restore MSRs. */ - movl $(IA32_EFER), %ecx - rdmsr - movl -0x8(%ebp), %eax - movl -0xc(%ebp), %edx - - wrmsr - #else push $0x0 /* Padding */ push %ebx /* uintptr_t *canary */ diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index 19793a0..3d76812 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -43,14 +43,6 @@
#define SMM_HANDLER_OFFSET 0x0000
-#if ENV_X86_64 -.bss -ia32efer_backup_eax: -.long 0 -ia32efer_backup_edx: -.long 0 -#endif - /* initially SMM is some sort of real mode. Let gcc know * how to treat the SMM handler stub */ @@ -168,12 +160,6 @@ movl %ebx, %esp
#if ENV_X86_64 - /* Backup IA32_EFER. Preserves ebx. */ - movl $(IA32_EFER), %ecx - rdmsr - movl %eax, ia32efer_backup_eax - movl %edx, ia32efer_backup_edx - /* Enable long mode. Preserves ebx. */ #include <cpu/x86/64bit/entry64.inc>
@@ -181,23 +167,6 @@ /* Call C handler */ call smi_handler
-#if ENV_X86_64 - /* - * The only reason to go back to protected mode is that RSM doesn't restore - * MSR registers and MSR IA32_EFER was modified by entering long mode. - * Drop to protected mode to safely operate on the IA32_EFER MSR. - */ - - /* Disable long mode. */ - #include <cpu/x86/64bit/exit32.inc> - - /* Restore IA32_EFER as RSM doesn't restore MSRs. */ - movl $(IA32_EFER), %ecx - movl ia32efer_backup_eax, %eax - movl ia32efer_backup_edx, %edx - wrmsr -#endif - /* To return, just do rsm. It will "clean up" protected mode */ rsm