Patrick Georgi submitted this change.

View Change

Approvals: build bot (Jenkins): Verified Raul Rangel: Looks good to me, approved Angel Pons: Looks good to me, but someone else must approve
cpu/x86/smm: Add support for long mode

Enable long mode in SMM handler.
x86_32 isn't affected by this change.

As the rsm instruction used to leave SMM doesn't restore MSR registers,
drop back to protected mode after running the smi_handler and restore
IA32_EFER MSR (which enables long mode support) to previous value.

NOTE: This commit does NOT introduce a new security model. It uses the
same page tables as the remaining firmware does.
This can be a security risk if someone is able to manipulate the
page tables stored in ROM at runtime. USE FOR TESTING ONLY!

Tested on Qemu Q35.

Change-Id: I8bba4af4688c723fc079ae905dac95f57ea956f8
Signed-off-by: Patrick Rudolph <siro@das-labor.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/35681
Reviewed-by: Raul Rangel <rrangel@chromium.org>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
---
M Documentation/arch/x86/index.md
A src/cpu/x86/64bit/exit32.inc
M src/cpu/x86/smm/smmhandler.S
3 files changed, 130 insertions(+), 1 deletion(-)

diff --git a/Documentation/arch/x86/index.md b/Documentation/arch/x86/index.md
index 30dcc10..f5546d1 100644
--- a/Documentation/arch/x86/index.md
+++ b/Documentation/arch/x86/index.md
@@ -45,6 +45,7 @@
* Add x86_64 exception handlers - *DONE*
* Setup page tables for long mode - *DONE*
* Add assembly code for long mode - *DONE*
+* Add assembly code for SMM - *DONE*
* Add assembly code for postcar stage - *TODO*
* Add assembly code to return to protected mode - *TODO*
* Implement reference code for mainboard `emulation/qemu-q35` - *TODO*
diff --git a/src/cpu/x86/64bit/exit32.inc b/src/cpu/x86/64bit/exit32.inc
new file mode 100644
index 0000000..48837d9
--- /dev/null
+++ b/src/cpu/x86/64bit/exit32.inc
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * For droping from long mode to protected mode.
+ *
+ * For reference see "AMD64 ArchitectureProgrammer's Manual Volume 2",
+ * Document 24593-Rev. 3.31-July 2019 Chapter 5.3
+ *
+ * Clobbers: rax, rbx, rcx, rdx
+ */
+.code64
+
+#include <cpu/x86/msr.h>
+#include <cpu/x86/cr.h>
+#include <arch/rom_segs.h>
+
+drop_longmode:
+ /* Ensure cache is clean. */
+ wbinvd
+
+ /* Set 32-bit code segment and ss */
+ mov $ROM_CODE_SEG, %rcx
+ /* SetCodeSelector32 will drop us to protected mode on return */
+ call SetCodeSelector32
+
+ /* Skip SetCodeSelector32 */
+.code32
+ jmp __longmode_compatibility
+
+.align 8
+.code64
+SetCodeSelector32:
+ # pop the return address from stack
+ pop %rbx
+
+ # save rsp because we need to push it after ss
+ mov %rsp, %rdx
+
+ # use iret to jump to a 32-bit offset in a new code segment
+ # iret will pop cs:rip, flags, then ss:rsp
+ mov %ss, %ax # need to push ss, but push ss instuction
+ push %rax # not valid in x64 mode, so use ax
+ push %rdx # the rsp to load
+ pushfq # push rflags
+ push %rcx # cx is code segment selector from caller
+ push %rbx # push the IP for the next instruction
+
+ # the iretq will behave like ret, with the new cs/ss value loaded
+ iretq
+
+.align 4
+.code32
+__longmode_compatibility:
+ /* Running in 32-bit compatibility mode */
+
+ /* Use flat data segment */
+ movl $ROM_DATA_SEG, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
+ movl %eax, %fs
+ movl %eax, %gs
+
+ /* Disable paging. */
+ movl %cr0, %eax
+ andl $(~CR0_PG), %eax
+ movl %eax, %cr0
+
+ /* Disable long mode. */
+ movl $(IA32_EFER), %ecx
+ rdmsr
+ andl $(~EFER_LME), %eax
+ wrmsr
+
+ /* Disable PAE. */
+ movl %cr4, %eax
+ andl $(~CR4_PAE), %eax
+ movl %eax, %cr4
+
+ /* Clear page table register */
+ xor %eax, %eax
+ movl %eax, %cr3
+
+__longmode_exit:
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S
index 036bc83..340840f 100644
--- a/src/cpu/x86/smm/smmhandler.S
+++ b/src/cpu/x86/smm/smmhandler.S
@@ -8,6 +8,7 @@
*/

#include <cpu/x86/lapic_def.h>
+#include <cpu/x86/msr.h>

/*
* +--------------------------------+ 0xaffff
@@ -42,6 +43,14 @@

#define SMM_HANDLER_OFFSET 0x0000

+#if defined(__x86_64__)
+.bss
+ia32efer_backup_eax:
+.long
+ia32efer_backup_edx:
+.long
+#endif
+
/* initially SMM is some sort of real mode. Let gcc know
* how to treat the SMM handler stub
*/
@@ -159,12 +168,44 @@
/* Get SMM revision */
movl $0xa8000 + 0x7efc, %ebx /* core 0 address */
subl %ebp, %ebx /* subtract core X offset */
+
+#if defined(__x86_64__)
+ /* Backup IA32_EFER. Preserves ebx. */
+ movl $(IA32_EFER), %ecx
+ rdmsr
+ movl %eax, ia32efer_backup_eax
+ movl %edx, ia32efer_backup_edx
+
+ /* Enable long mode. Preserves ebx. */
+#include <cpu/x86/64bit/entry64.inc>
+
+ mov (%ebx), %rdi
+
+#else
movl (%ebx), %eax
pushl %eax
+#endif

- /* Call 32bit C handler */
+ /* Call C handler */
call smi_handler

+#if defined(__x86_64__)
+ /*
+ * The only reason to go back to protected mode is that RSM doesn't restore
+ * MSR registers and MSR IA32_EFER was modified by entering long mode.
+ * Drop to protected mode to safely operate on the IA32_EFER MSR.
+ */
+
+ /* Disable long mode. */
+ #include <cpu/x86/64bit/exit32.inc>
+
+ /* Restore IA32_EFER as RSM doesn't restore MSRs. */
+ movl $(IA32_EFER), %ecx
+ movl ia32efer_backup_eax, %eax
+ movl ia32efer_backup_edx, %edx
+ wrmsr
+#endif
+
/* To return, just do rsm. It will "clean up" protected mode */
rsm

@@ -190,6 +231,9 @@
.word 0xffff, 0x0000
.byte 0x00, 0x93, 0xcf, 0x00

+ /* gdt selector 0x18, flat code segment (64-bit) */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x9b, 0xaf, 0x00
smm_gdt_end:



To view, visit change 35681. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I8bba4af4688c723fc079ae905dac95f57ea956f8
Gerrit-Change-Number: 35681
Gerrit-PatchSet: 18
Gerrit-Owner: Patrick Rudolph <siro@das-labor.org>
Gerrit-Reviewer: Aaron Durbin <adurbin@chromium.org>
Gerrit-Reviewer: Angel Pons <th3fanbus@gmail.com>
Gerrit-Reviewer: Arthur Heymans <arthur@aheymans.xyz>
Gerrit-Reviewer: David Hendricks <david.hendricks@gmail.com>
Gerrit-Reviewer: Lee Leahy <leroy.p.leahy@intel.com>
Gerrit-Reviewer: Martin Roth <martinroth@google.com>
Gerrit-Reviewer: Patrick Georgi <pgeorgi@google.com>
Gerrit-Reviewer: Patrick Rudolph <patrick.rudolph@9elements.com>
Gerrit-Reviewer: Patrick Rudolph <siro@das-labor.org>
Gerrit-Reviewer: Paul Menzel <paulepanter@users.sourceforge.net>
Gerrit-Reviewer: Raul Rangel <rrangel@chromium.org>
Gerrit-Reviewer: Rudolf Marek <r.marek@assembler.cz>
Gerrit-Reviewer: Stefan Reinauer <stefan.reinauer@coreboot.org>
Gerrit-Reviewer: build bot (Jenkins) <no-reply@coreboot.org>
Gerrit-Reviewer: ron minnich <rminnich@gmail.com>
Gerrit-CC: Patrick Rudolph
Gerrit-MessageType: merged