[coreboot-gerrit] Change in ...coreboot[master]: arch/x86/boot: Call payload in protected mode

Patrick Rudolph (Code Review) gerrit at coreboot.org
Sun Dec 9 14:37:52 CET 2018


Patrick Rudolph has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/30118


Change subject: arch/x86/boot: Call payload in protected mode
......................................................................

arch/x86/boot: Call payload in protected mode

On ARCH_RAMSTAGE_X86_64 call the payload in protected mode.
Add a helper function to call arbitraty code in protected mode,
similar to the real mode call handler.

Tested using SeaBios as payload.
Untested for anything else.

Change-Id: I6552ac30f1b6205e08e16d251328e01ce3fbfd14
Signed-off-by: Patrick Rudolph <siro at das-labor.org>
---
M src/arch/x86/boot.c
M src/arch/x86/c_start.S
M src/include/program_loading.h
3 files changed, 186 insertions(+), 10 deletions(-)



  git pull ssh://review.coreboot.org:29418/coreboot refs/changes/18/30118/1

diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c
index 2967cf6..cb9f34b 100644
--- a/src/arch/x86/boot.c
+++ b/src/arch/x86/boot.c
@@ -32,13 +32,20 @@
 
 void arch_prog_run(struct prog *prog)
 {
-	__asm__ volatile (
-#ifdef __x86_64__
-		"jmp  *%%rdi\n"
+#ifdef __ARCH_x86_64__
+#if ENV_RAMSTAGE
+	protected_mode_call(false, (uintptr_t)prog_entry(prog),
+			    (uintptr_t)prog_entry_arg(prog));
 #else
-		"jmp  *%%edi\n"
-#endif
-
+	__asm__ volatile (
+		"jmp  *%%rdi\n"
 		:: "D"(prog_entry(prog))
 	);
+#endif
+#else
+	__asm__ volatile (
+		"jmp  *%%edi\n"
+		:: "D"(prog_entry(prog))
+	);
+#endif
 }
diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S
index 6426ef3..8148e58 100644
--- a/src/arch/x86/c_start.S
+++ b/src/arch/x86/c_start.S
@@ -50,7 +50,7 @@
 	movl	%eax, %gs
 #ifdef __x86_64__
 	mov     $0x48, %ecx
-	call    SetCodeSelector
+	call    SetCodeSelector64
 #endif
 
 	post_code(POST_ENTRY_C_START)		/* post 13 */
@@ -207,7 +207,7 @@
 
 	.section ".text._start", "ax", @progbits
 #ifdef __x86_64__
-SetCodeSelector:
+SetCodeSelector64:
 	# save rsp because iret will align it to a 16 byte boundary
 	mov	%rsp, %rdx
 
@@ -219,14 +219,14 @@
 	push	%rsp
 	pushfq
 	push	%rcx		# cx is code segment selector from caller
-	mov	$setCodeSelectorLongJump, %rax
+	mov	$setCodeSelectorLongJump64, %rax
 	push	%rax
 
 	# the iret will continue at next instruction, with the new cs value
 	# loaded
 	iretq
 
-setCodeSelectorLongJump:
+setCodeSelectorLongJump64:
 	# restore rsp, it might not have been 16-byte aligned on entry
 	mov	%rdx, %rsp
 	ret
@@ -237,3 +237,161 @@
 	.previous
 .code32
 #endif
+
+#ifdef __ARCH_x86_64__
+
+	/*
+	 * Functions to handle mode switches from longmode to protected
+	 * mode. Similar to realmode switches.
+	 */
+	.section .bss, "aw", @nobits
+
+	.section ".text._mode_switch", "ax", @progbits
+.code64
+
+	.globl protected_mode_call
+protected_mode_call:
+
+	push %rbp
+	mov %rsp, %rbp
+
+	/* Preserve registers */
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	/* Arguments to stack */
+	push	%rdi
+	push	%rsi
+	push	%rdx
+
+	/* Ensure cache is clean. */
+	invd
+
+	/* Running in compatibility mode? */
+	mov	-48(%rbp), %rax
+	test	%rax, %rax
+	jne	1f
+
+	/* Disable paging */
+	mov	%cr0, %rax
+	and	$0x7FFFFFFF, %rax
+	mov	%rax, %cr0
+
+	/* Disable long mode */
+	mov	$0xC0000080, %rcx
+	rdmsr
+	and	$(~0x100), %rax
+	wrmsr
+
+	/* Disable PAE */
+	mov	%cr4, %rax
+	and	$(~0x20), %rax
+	mov	%rax, %cr4
+1:
+	/* Set  32-bit code segment and ss */
+	mov	$0x10, %rcx
+	call	SetCodeSelector32
+
+.code32
+	/* Use flat 32-bit data segment. */
+	movl	$0x18, %eax
+	movl	%eax, %ds
+	movl	%eax, %es
+	movl	%eax, %ss
+	movl	%eax, %fs
+	movl	%eax, %gs
+
+	movl	-56(%ebp), %eax	/* Function to call */
+	movl	-64(%ebp), %ebx	/* Argument 0 */
+
+	/* Align the stack */
+	andl	$0xFFFFFFF0, %esp
+	subl	$12, %esp
+	pushl	%ebx	/* Argument 0 */
+
+	call	*%eax
+
+	pushl	%eax
+	pushl	$0
+
+	/* Running in compatibility mode? */
+	mov	-48(%ebp), %eax
+	test	%eax, %eax
+	jne	1f
+
+	/* Enable PAE */
+	mov	%cr4, %eax
+	or	$0x20, %eax
+	mov	%eax, %cr4
+
+	/* Enable long mode */
+	mov	$0xC0000080, %ecx
+	rdmsr
+	or	$0x100, %eax
+	wrmsr
+
+	/* Enable paging */
+	mov	%cr0, %eax
+	or	$0x80000000, %eax
+	mov	%eax, %cr0
+
+	/* Just to be sure ... */
+	lgdt	%cs:gdtaddr
+1:
+	/* Back to long mode */
+	ljmp	$0x48, $1f
+.code64
+1:	movl	$0x18, %eax
+	movl	%eax, %ds
+	movl	%eax, %es
+	movl	%eax, %ss
+	movl	%eax, %fs
+	movl	%eax, %gs
+
+	/* Place return value in rax */
+	pop	%rax
+
+	/* Restore registers */
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbx
+
+	/* Restore stack pointer */
+	mov %rbp, %rsp
+	pop %rbp
+
+	ret
+
+.code64
+SetCodeSelector32:
+	# save rsp because iret will align it to a 16 byte boundary
+	mov	%rsp, %rdx
+
+	# use iret to jump to a 32-bit offset in a new code segment
+	# iret will pop cs:rip, flags, then ss:rsp
+	mov	%ss, %ax	# need to push ss..
+	push	%rax		# push ss instuction not valid in x64 mode,
+				# so use ax
+	push	%rsp
+	pushfq
+	push	%rcx		# cx is code segment selector from caller
+	mov	$setCodeSelectorLongJump32, %rax
+	push	%rax
+
+	# the iret will continue at next instruction, with the new cs value
+	# loaded
+	iretq
+
+.code32
+setCodeSelectorLongJump32:
+	# restore esp, it might not have been 16-byte aligned on entry
+	mov	%edx, %esp
+	ret
+
+	.previous
+#endif
diff --git a/src/include/program_loading.h b/src/include/program_loading.h
index 468f0b3..84e5194 100644
--- a/src/include/program_loading.h
+++ b/src/include/program_loading.h
@@ -182,6 +182,17 @@
  * if ramstage overwrites low memory. */
 void backup_ramstage_section(uintptr_t base, size_t size);
 
+/* Run function in protected mode.
+ * @arg compatibility_mode Use 32-bit compatibility mode instead of
+ *                         protected mode
+ * @arg func_ptr           Function to call in protected mode
+ * @arg                    Argument to pass to called function
+ *
+ * @return               The called function returned value
+*/
+uint32_t protected_mode_call(bool compatibility_mode, uintptr_t func_ptr,
+			     uint32_t argument);
+
 /***********************
  *   PAYLOAD LOADING   *
  ***********************/

-- 
To view, visit https://review.coreboot.org/c/coreboot/+/30118
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings

Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I6552ac30f1b6205e08e16d251328e01ce3fbfd14
Gerrit-Change-Number: 30118
Gerrit-PatchSet: 1
Gerrit-Owner: Patrick Rudolph <siro at das-labor.org>
Gerrit-MessageType: newchange
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.coreboot.org/pipermail/coreboot-gerrit/attachments/20181209/2ece1c3f/attachment.html>


More information about the coreboot-gerrit mailing list