Patrick Rudolph has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/35681 )
Change subject: cpu/x86/smm: Add support for long mode ......................................................................
cpu/x86/smm: Add support for long mode
Enable long mode in SMM handler. x86_32 isn't affected from this change.
As the rsm instruction used to leave SMM doesn't restore MSR registers, drop back to protected mode after running the smi_handler and restore IA32_EFER MSR to previous value.
Tested on Qemu Q35. Needs tests on real hardware.
Change-Id: I8bba4af4688c723fc079ae905dac95f57ea956f8 Signed-off-by: Patrick Rudolph siro@das-labor.org --- M Documentation/arch/x86/index.md M src/cpu/x86/smm/smmhandler.S 2 files changed, 95 insertions(+), 1 deletion(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/81/35681/1
diff --git a/Documentation/arch/x86/index.md b/Documentation/arch/x86/index.md index 462e7e6..b02b879 100644 --- a/Documentation/arch/x86/index.md +++ b/Documentation/arch/x86/index.md @@ -45,6 +45,7 @@ * Add x86_64 exception handlers - *TODO* * Setup page tables for long mode - *DONE* * Add assembly code for long mode - *DONE* +* Add assembly code for SMM - *DONE* * Add assembly code for postcar stage - *TODO* * Add assembly code to return to protected mode - *TODO* * Implement reference code for mainboard `emulation/qemu-q35` - *TODO* diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index a2be7f2..fd8af79 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -20,6 +20,7 @@ */
#include <cpu/x86/lapic_def.h> +#include <cpu/x86/msr.h>
/* * +--------------------------------+ 0xaffff @@ -171,15 +172,104 @@ /* Get SMM revision */ movl $0xa8000 + 0x7efc, %ebx /* core 0 address */ subl %ebp, %ebx /* subtract core X offset */ + +#if defined(__x86_64__) + /* Backup IA32_EFER. Preserves ebx. */ + movl $(IA32_EFER), %ecx + rdmsr + movl %eax, ia32efer_backup + + /* Enable long mode if required. Preserves ebx. */ +#include <cpu/x86/64bit/entry64.inc> + + mov (%ebx), %rdi + +#else movl (%ebx), %eax pushl %eax +#endif
- /* Call 32bit C handler */ + /* Call C handler */ call smi_handler
+#if defined(__x86_64__) + /* + * The only reason to go back to protected mode is that rsm doesn't restore + * MSR registers and IA32_EFER was modified. + * To restore IA32_EFER go back to protected mode. + */ + + /* Ensure cache is clean. */ + invd + + /* Set 32-bit code segment and ss */ + mov $0x08, %rcx + call SetCodeSelector32 + +.code32 + /* Running in 32-bit compatibility mode */ + + /* Use flat data segment */ + movl $0x10, %eax + movl %eax, %ds + movl %eax, %es + movl %eax, %ss + movl %eax, %fs + movl %eax, %gs + + /* Disable paging */ + movl %cr0, %eax + andl $0x7FFFFFFF, %eax + movl %eax, %cr0 + + /* Disable long mode */ + movl $(IA32_EFER), %ecx + rdmsr + andl $(~EFER_LME), %eax + wrmsr + + /* Disable PAE */ + movl %cr4, %eax + andl $(~0x20), %eax + movl %eax, %cr4 + + /* Restore IA32_EFER */ + movl $(IA32_EFER), %ecx + rdmsr + movl ia32efer_backup, %eax + wrmsr +#endif + /* To return, just do rsm. It will "clean up" protected mode */ rsm
+#if defined(__x86_64__) +ia32efer_backup: +.long + +.align 8 +.code64 +SetCodeSelector32: + # pop the return address from stack + pop %rbx + + # save rsp because we need to push it after ss + mov %rsp, %rdx + + # use iret to jump to a 32-bit offset in a new code segment + # iret will pop cs:rip, flags, then ss:rsp + mov %ss, %ax # need to push ss.. + push %rax # push ss instuction not valid in x64 mode, + # so use ax + push %rdx # the rsp to load + pushfq # push rflags + push %rcx # cx is code segment selector from caller + push %rbx # push the IP for the next instruction + + # the iretq will behave like ret, with the new cs/ss value loaded + iretq +#endif + .code16
.align 4, 0xff @@ -202,6 +292,9 @@ .word 0xffff, 0x0000 .byte 0x00, 0x93, 0xcf, 0x00
+ /* gdt selector 0x18, flat code segment (64-bit) */ + .word 0xffff, 0x0000 + .byte 0x00, 0x9b, 0xaf, 0x00 smm_gdt_end: