Always enable caching at start of 32bit code and always make sure the paging flag is off. Because this alters the cr0 register, perform a backup and restore of it when using call32().
Also, rename get/setcr0() to cr0_read/write() to more closely match other register access functions.
Signed-off-by: Kevin O'Connor kevin@koconnor.net --- src/fw/smp.c | 3 --- src/post.c | 3 --- src/romlayout.S | 1 + src/stacks.c | 45 ++++++++++++++++++++++++++++----------------- src/x86.h | 9 ++++++--- 5 files changed, 35 insertions(+), 26 deletions(-)
diff --git a/src/fw/smp.c b/src/fw/smp.c index a466ea6..579acdb 100644 --- a/src/fw/smp.c +++ b/src/fw/smp.c @@ -52,9 +52,6 @@ handle_smp(void) if (!CONFIG_QEMU) return;
- // Enable CPU caching - setcr0(getcr0() & ~(CR0_CD|CR0_NW)); - // Detect apic_id u32 eax, ebx, ecx, cpuid_features; cpuid(1, &eax, &ebx, &ecx, &cpuid_features); diff --git a/src/post.c b/src/post.c index 6803585..e19b06c 100644 --- a/src/post.c +++ b/src/post.c @@ -158,9 +158,6 @@ device_hardware_setup(void) static void platform_hardware_setup(void) { - // Enable CPU caching - setcr0(getcr0() & ~(CR0_CD|CR0_NW)); - // Make sure legacy DMA isn't running. dma_setup();
diff --git a/src/romlayout.S b/src/romlayout.S index fefc212..823188b 100644 --- a/src/romlayout.S +++ b/src/romlayout.S @@ -47,6 +47,7 @@ transition32_nmi_off:
// Enable protected mode movl %cr0, %ecx + andl $~(CR0_PG|CR0_CD|CR0_NW), %ecx orl $CR0_PE, %ecx movl %ecx, %cr0
diff --git a/src/stacks.c b/src/stacks.c index a156138..fa9c7db 100644 --- a/src/stacks.c +++ b/src/stacks.c @@ -1,6 +1,6 @@ // Code for manipulating stack locations. // -// Copyright (C) 2009-2014 Kevin O'Connor kevin@koconnor.net +// Copyright (C) 2009-2015 Kevin O'Connor kevin@koconnor.net // // This file may be distributed under the terms of the GNU LGPLv3 license.
@@ -28,6 +28,7 @@ struct { u8 cmosindex; u8 a20; u16 ss, fs, gs; + u32 cr0; struct descloc_s gdt; } Call16Data VARLOW;
@@ -37,19 +38,17 @@ struct { int HaveSmmCall32 VARFSEG;
// Backup state in preparation for call32 -static void +static int call32_prep(u8 method) { - // Backup cmos index register and disable nmi - u8 cmosindex = inb(PORT_CMOS_INDEX); - outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX); - inb(PORT_CMOS_DATA); - SET_LOW(Call16Data.cmosindex, cmosindex); - - // Backup ss - SET_LOW(Call16Data.ss, GET_SEG(SS)); - if (!CONFIG_CALL32_SMM || method != C16_SMM) { + // Backup cr0 + u32 cr0 = cr0_read(); + if (cr0 & CR0_PE) + // Called in 16bit protected mode?! + return -1; + SET_LOW(Call16Data.cr0, cr0); + // Backup fs/gs and gdt SET_LOW(Call16Data.fs, GET_SEG(FS)); SET_LOW(Call16Data.gs, GET_SEG(GS)); @@ -62,7 +61,17 @@ call32_prep(u8 method) SET_LOW(Call16Data.a20, set_a20(1)); }
+ // Backup ss + SET_LOW(Call16Data.ss, GET_SEG(SS)); + + // Backup cmos index register and disable nmi + u8 cmosindex = inb(PORT_CMOS_INDEX); + outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX); + inb(PORT_CMOS_DATA); + SET_LOW(Call16Data.cmosindex, cmosindex); + SET_LOW(Call16Data.method, method); + return 0; }
// Restore state backed up during call32 @@ -84,6 +93,11 @@ call32_post(void) lgdt(&gdt); SET_SEG(FS, GET_LOW(Call16Data.fs)); SET_SEG(GS, GET_LOW(Call16Data.gs)); + + // Restore cr0 + u32 cr0_caching = GET_LOW(Call16Data.cr0) & (CR0_CD|CR0_NW); + if (cr0_caching) + cr0_mask(CR0_CD|CR0_NW, cr0_caching); }
// Restore cmos index register @@ -220,14 +234,11 @@ call32(void *func, u32 eax, u32 errret) ASSERT16(); if (CONFIG_CALL32_SMM && GET_GLOBAL(HaveSmmCall32)) return call32_smm(func, eax); - u32 cr0 = getcr0(); - if (cr0 & CR0_PE) - // Called in 16bit protected mode?! - return errret; - // Jump direclty to 32bit mode - this clobbers the 16bit segment // selector registers. - call32_prep(C16_BIG); + int ret = call32_prep(C16_BIG); + if (ret) + return errret; u32 bkup_ss, bkup_esp; asm volatile( // Backup ss/esp / set esp to flat stack location diff --git a/src/x86.h b/src/x86.h index 234a6e2..19d404f 100644 --- a/src/x86.h +++ b/src/x86.h @@ -75,15 +75,18 @@ static inline void __cpuid(u32 index, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) : "0" (index)); }
-static inline u32 getcr0(void) { +static inline u32 cr0_read(void) { u32 cr0; asm("movl %%cr0, %0" : "=r"(cr0)); return cr0; } -static inline void setcr0(u32 cr0) { +static inline void cr0_write(u32 cr0) { asm("movl %0, %%cr0" : : "r"(cr0)); } -static inline u16 getcr0_vm86(void) { +static inline void cr0_mask(u32 off, u32 on) { + cr0_write((cr0_read() & ~off) | on); +} +static inline u16 cr0_vm86_read(void) { u16 cr0; asm("smsww %0" : "=r"(cr0)); return cr0;