This series simplifies some of the existing trampoline code.
-Kevin
Kevin O'Connor (5): Unify smm/sloppy variants of call32_prep/post and call16_helper Rename Call32Data to Call16Data Unify inline assembler in variants of call16 functions Unify call32_sloppy() and call32() Use transition32_nmi_off from call32() and call16_back()
src/romlayout.S | 10 +- src/stacks.c | 282 +++++++++++++++++++++----------------------------------- 2 files changed, 106 insertions(+), 186 deletions(-)
The "smm" and "sloppy" variants of the 16bit to 32bit trampoline backup/restore code are very similar. They can be unified into a single copy of each function.
Signed-off-by: Kevin O'Connor kevin@koconnor.net --- src/stacks.c | 132 ++++++++++++++++++++++------------------------------------- 1 file changed, 50 insertions(+), 82 deletions(-)
diff --git a/src/stacks.c b/src/stacks.c index 708add5..339b92b 100644 --- a/src/stacks.c +++ b/src/stacks.c @@ -35,9 +35,9 @@ struct {
int HaveSmmCall32 VARFSEG;
-// Backup state in preparation for call32_smm() +// Backup state in preparation for call32 static void -call32_smm_prep(void) +call32_prep(u8 method) { // Backup cmos index register and disable nmi u8 cmosindex = inb(PORT_CMOS_INDEX); @@ -48,19 +48,57 @@ call32_smm_prep(void) // Backup ss SET_LOW(Call32Data.ss, GET_SEG(SS));
- SET_LOW(Call32Data.method, C32_SMM); + if (!CONFIG_CALL32_SMM || method != C32_SMM) { + // Backup fs/gs and gdt + SET_LOW(Call32Data.fs, GET_SEG(FS)); + SET_LOW(Call32Data.gs, GET_SEG(GS)); + struct descloc_s gdt; + sgdt(&gdt); + SET_LOW(Call32Data.gdt.length, gdt.length); + SET_LOW(Call32Data.gdt.addr, gdt.addr); + + // Enable a20 and backup its previous state + SET_LOW(Call32Data.a20, set_a20(1)); + } + + SET_LOW(Call32Data.method, method); }
-// Restore state backed up during call32_smm() -static void -call32_smm_post(void) +// Restore state backed up during call32 +static u8 +call32_post(void) { + u8 method = GET_LOW(Call32Data.method); SET_LOW(Call32Data.method, 0); SET_LOW(Call32Data.ss, 0);
+ if (!CONFIG_CALL32_SMM || method != C32_SMM) { + // Restore a20 + set_a20(GET_LOW(Call32Data.a20)); + + // Restore gdt and fs/gs + struct descloc_s gdt; + gdt.length = GET_LOW(Call32Data.gdt.length); + gdt.addr = GET_LOW(Call32Data.gdt.addr); + lgdt(&gdt); + SET_SEG(FS, GET_LOW(Call32Data.fs)); + SET_SEG(GS, GET_LOW(Call32Data.gs)); + } + // Restore cmos index register outb(GET_LOW(Call32Data.cmosindex), PORT_CMOS_INDEX); inb(PORT_CMOS_DATA); + return method; +} + +// 16bit handler code called from call16_sloppy() / call16_smm() +u32 VISIBLE16 +call16_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx)) +{ + u8 method = call32_post(); + u32 ret = func(eax, edx); + call32_prep(method); + return ret; }
#define ASM32_SWITCH16 " .pushsection .text.32fseg." UNIQSEC "\n .code16\n" @@ -74,7 +112,7 @@ call32_smm(void *func, u32 eax) { ASSERT16(); dprintf(9, "call32_smm %p %x\n", func, eax); - call32_smm_prep(); + call32_prep(C32_SMM); u32 bkup_esp; asm volatile( // Backup esp / set esp to flat stack location @@ -109,24 +147,12 @@ call32_smm(void *func, u32 eax) : "=&r" (bkup_esp), "+r" (eax) : "r" (func) : "eax", "ecx", "edx", "ebx", "cc", "memory"); - call32_smm_post(); + call32_post();
dprintf(9, "call32_smm done %p %x\n", func, eax); return eax; }
-// 16bit handler code called from call16_smm() -u32 VISIBLE16 -call16_smm_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx)) -{ - if (!CONFIG_CALL32_SMM) - return eax; - call32_smm_post(); - u32 ret = func(eax, edx); - call32_smm_prep(); - return ret; -} - static u32 call16_smm(u32 eax, u32 edx, void *func) { @@ -151,7 +177,7 @@ call16_smm(u32 eax, u32 edx, void *func) ASM32_SWITCH16 "1:movl %1, %%eax\n" " movl %3, %%ecx\n" - " calll _cfunc16_call16_smm_helper\n" + " calll _cfunc16_call16_helper\n" " movl %%eax, %1\n"
" movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n" @@ -170,61 +196,13 @@ call16_smm(u32 eax, u32 edx, void *func) return eax; }
-// Backup state in preparation for call32_sloppy() -static void -call32_sloppy_prep(void) -{ - // Backup cmos index register and disable nmi - u8 cmosindex = inb(PORT_CMOS_INDEX); - outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX); - inb(PORT_CMOS_DATA); - SET_LOW(Call32Data.cmosindex, cmosindex); - - // Enable a20 and backup its previous state - SET_LOW(Call32Data.a20, set_a20(1)); - - // Backup ss/fs/gs and gdt - SET_LOW(Call32Data.ss, GET_SEG(SS)); - SET_LOW(Call32Data.fs, GET_SEG(FS)); - SET_LOW(Call32Data.gs, GET_SEG(GS)); - struct descloc_s gdt; - sgdt(&gdt); - SET_LOW(Call32Data.gdt.length, gdt.length); - SET_LOW(Call32Data.gdt.addr, gdt.addr); - - SET_LOW(Call32Data.method, C32_SLOPPY); -} - -// Restore state backed up during call32_sloppy() -static void -call32_sloppy_post(void) -{ - SET_LOW(Call32Data.method, 0); - SET_LOW(Call32Data.ss, 0); - - // Restore gdt and fs/gs - struct descloc_s gdt; - gdt.length = GET_LOW(Call32Data.gdt.length); - gdt.addr = GET_LOW(Call32Data.gdt.addr); - lgdt(&gdt); - SET_SEG(FS, GET_LOW(Call32Data.fs)); - SET_SEG(GS, GET_LOW(Call32Data.gs)); - - // Restore a20 - set_a20(GET_LOW(Call32Data.a20)); - - // Restore cmos index register - outb(GET_LOW(Call32Data.cmosindex), PORT_CMOS_INDEX); - inb(PORT_CMOS_DATA); -} - // Call a C function in 32bit mode. This clobbers the 16bit segment // selector registers. static u32 call32_sloppy(void *func, u32 eax) { ASSERT16(); - call32_sloppy_prep(); + call32_prep(C32_SLOPPY); u32 bkup_ss, bkup_esp; asm volatile( // Backup ss/esp / set esp to flat stack location @@ -250,20 +228,10 @@ call32_sloppy(void *func, u32 eax) : "=&r" (bkup_ss), "=&r" (bkup_esp), "+a" (eax) : "r" (func) : "ecx", "edx", "cc", "memory"); - call32_sloppy_post(); + call32_post(); return eax; }
-// 16bit handler code called from call16_sloppy() -u32 VISIBLE16 -call16_sloppy_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx)) -{ - call32_sloppy_post(); - u32 ret = func(eax, edx); - call32_sloppy_prep(); - return ret; -} - // Jump back to 16bit mode while in 32bit mode from call32_sloppy() static u32 call16_sloppy(u32 eax, u32 edx, void *func) @@ -286,7 +254,7 @@ call16_sloppy(u32 eax, u32 edx, void *func) " movw %%cx, %%ds\n" " movl %2, %%edx\n" " movl %1, %%ecx\n" - " calll _cfunc16_call16_sloppy_helper\n" + " calll _cfunc16_call16_helper\n" // Return to 32bit and restore esp " movl $2f, %%edx\n" " jmp transition32\n"
The variable stores information on how and what to restore during a call to 16bit code, so Call16Data is a better name.
Signed-off-by: Kevin O'Connor kevin@koconnor.net --- src/stacks.c | 58 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 29 deletions(-)
diff --git a/src/stacks.c b/src/stacks.c index 339b92b..7c6ccd2 100644 --- a/src/stacks.c +++ b/src/stacks.c @@ -28,10 +28,10 @@ struct { u8 a20; u16 ss, fs, gs; struct descloc_s gdt; -} Call32Data VARLOW; +} Call16Data VARLOW;
-#define C32_SLOPPY 1 -#define C32_SMM 2 +#define C16_SLOPPY 1 +#define C16_SMM 2
int HaveSmmCall32 VARFSEG;
@@ -43,50 +43,50 @@ call32_prep(u8 method) u8 cmosindex = inb(PORT_CMOS_INDEX); outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX); inb(PORT_CMOS_DATA); - SET_LOW(Call32Data.cmosindex, cmosindex); + SET_LOW(Call16Data.cmosindex, cmosindex);
// Backup ss - SET_LOW(Call32Data.ss, GET_SEG(SS)); + SET_LOW(Call16Data.ss, GET_SEG(SS));
- if (!CONFIG_CALL32_SMM || method != C32_SMM) { + if (!CONFIG_CALL32_SMM || method != C16_SMM) { // Backup fs/gs and gdt - SET_LOW(Call32Data.fs, GET_SEG(FS)); - SET_LOW(Call32Data.gs, GET_SEG(GS)); + SET_LOW(Call16Data.fs, GET_SEG(FS)); + SET_LOW(Call16Data.gs, GET_SEG(GS)); struct descloc_s gdt; sgdt(&gdt); - SET_LOW(Call32Data.gdt.length, gdt.length); - SET_LOW(Call32Data.gdt.addr, gdt.addr); + SET_LOW(Call16Data.gdt.length, gdt.length); + SET_LOW(Call16Data.gdt.addr, gdt.addr);
// Enable a20 and backup its previous state - SET_LOW(Call32Data.a20, set_a20(1)); + SET_LOW(Call16Data.a20, set_a20(1)); }
- SET_LOW(Call32Data.method, method); + SET_LOW(Call16Data.method, method); }
// Restore state backed up during call32 static u8 call32_post(void) { - u8 method = GET_LOW(Call32Data.method); - SET_LOW(Call32Data.method, 0); - SET_LOW(Call32Data.ss, 0); + u8 method = GET_LOW(Call16Data.method); + SET_LOW(Call16Data.method, 0); + SET_LOW(Call16Data.ss, 0);
- if (!CONFIG_CALL32_SMM || method != C32_SMM) { + if (!CONFIG_CALL32_SMM || method != C16_SMM) { // Restore a20 - set_a20(GET_LOW(Call32Data.a20)); + set_a20(GET_LOW(Call16Data.a20));
// Restore gdt and fs/gs struct descloc_s gdt; - gdt.length = GET_LOW(Call32Data.gdt.length); - gdt.addr = GET_LOW(Call32Data.gdt.addr); + gdt.length = GET_LOW(Call16Data.gdt.length); + gdt.addr = GET_LOW(Call16Data.gdt.addr); lgdt(&gdt); - SET_SEG(FS, GET_LOW(Call32Data.fs)); - SET_SEG(GS, GET_LOW(Call32Data.gs)); + SET_SEG(FS, GET_LOW(Call16Data.fs)); + SET_SEG(GS, GET_LOW(Call16Data.gs)); }
// Restore cmos index register - outb(GET_LOW(Call32Data.cmosindex), PORT_CMOS_INDEX); + outb(GET_LOW(Call16Data.cmosindex), PORT_CMOS_INDEX); inb(PORT_CMOS_DATA); return method; } @@ -112,7 +112,7 @@ call32_smm(void *func, u32 eax) { ASSERT16(); dprintf(9, "call32_smm %p %x\n", func, eax); - call32_prep(C32_SMM); + call32_prep(C16_SMM); u32 bkup_esp; asm volatile( // Backup esp / set esp to flat stack location @@ -161,7 +161,7 @@ call16_smm(u32 eax, u32 edx, void *func) return eax; func -= BUILD_BIOS_ADDR; dprintf(9, "call16_smm %p %x %x\n", func, eax, edx); - u32 stackoffset = Call32Data.ss << 4; + u32 stackoffset = Call16Data.ss << 4; asm volatile( // Restore esp " subl %0, %%esp\n" @@ -202,7 +202,7 @@ static u32 call32_sloppy(void *func, u32 eax) { ASSERT16(); - call32_prep(C32_SLOPPY); + call32_prep(C16_SLOPPY); u32 bkup_ss, bkup_esp; asm volatile( // Backup ss/esp / set esp to flat stack location @@ -240,7 +240,7 @@ call16_sloppy(u32 eax, u32 edx, void *func) if (getesp() > MAIN_STACK_MAX) panic("call16_sloppy with invalid stack\n"); func -= BUILD_BIOS_ADDR; - u32 stackseg = Call32Data.ss; + u32 stackseg = Call16Data.ss; asm volatile( // Transition to 16bit mode " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n" @@ -339,9 +339,9 @@ static u32 call16_back(u32 eax, u32 edx, void *func) { ASSERT32FLAT(); - if (CONFIG_CALL32_SMM && Call32Data.method == C32_SMM) + if (CONFIG_CALL32_SMM && Call16Data.method == C16_SMM) return call16_smm(eax, edx, func); - if (Call32Data.method == C32_SLOPPY) + if (Call16Data.method == C16_SLOPPY) return call16_sloppy(eax, edx, func); if (in_post()) return call16big(eax, edx, func); @@ -475,7 +475,7 @@ __call16_int(struct bregs *callregs, u16 offset) callregs->code.offset = offset; if (!MODESEGMENT) { callregs->code.seg = SEG_BIOS; - _farcall16((void*)callregs - Call32Data.ss * 16, Call32Data.ss); + _farcall16((void*)callregs - Call16Data.ss * 16, Call16Data.ss); return; } callregs->code.seg = GET_SEG(CS);
The assembler between call16(), call16big() and call16_sloppy() are very similar. Rework the functions so that a single version of the inline assembly can be used for all variants.
Signed-off-by: Kevin O'Connor kevin@koconnor.net --- src/romlayout.S | 9 ----- src/stacks.c | 102 +++++++++++++++++++------------------------------------- 2 files changed, 34 insertions(+), 77 deletions(-)
diff --git a/src/romlayout.S b/src/romlayout.S index d78737b..e223cdc 100644 --- a/src/romlayout.S +++ b/src/romlayout.S @@ -71,15 +71,6 @@ transition32_nmi_off: .global transition16big .code32 transition16: -#if CONFIG_DISABLE_A20 - // disable a20 - movl %eax, %ecx - inb $PORT_A20, %al - andb $~A20_ENABLE_BIT, %al - outb %al, $PORT_A20 - movl %ecx, %eax -#endif - // Reset data segment limits movl $SEG32_MODE16_DS, %ecx movw %cx, %ds diff --git a/src/stacks.c b/src/stacks.c index 7c6ccd2..f92a484 100644 --- a/src/stacks.c +++ b/src/stacks.c @@ -13,6 +13,7 @@ #include "output.h" // dprintf #include "romfile.h" // romfile_loadint #include "stacks.h" // struct mutex_s +#include "string.h" // memset #include "util.h" // useRTC
#define MAIN_STACK_MAX (1024*1024) @@ -30,8 +31,8 @@ struct { struct descloc_s gdt; } Call16Data VARLOW;
-#define C16_SLOPPY 1 -#define C16_SMM 2 +#define C16_BIG 1 +#define C16_SMM 2
int HaveSmmCall32 VARFSEG;
@@ -91,7 +92,7 @@ call32_post(void) return method; }
-// 16bit handler code called from call16_sloppy() / call16_smm() +// 16bit handler code called from call16_back() / call16_smm() u32 VISIBLE16 call16_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx)) { @@ -202,7 +203,7 @@ static u32 call32_sloppy(void *func, u32 eax) { ASSERT16(); - call32_prep(C16_SLOPPY); + call32_prep(C16_BIG); u32 bkup_ss, bkup_esp; asm volatile( // Backup ss/esp / set esp to flat stack location @@ -232,37 +233,45 @@ call32_sloppy(void *func, u32 eax) return eax; }
-// Jump back to 16bit mode while in 32bit mode from call32_sloppy() +// Call a 16bit SeaBIOS function, restoring the mode from last call32(). static u32 -call16_sloppy(u32 eax, u32 edx, void *func) +call16_back(u32 eax, u32 edx, void *func) { ASSERT32FLAT(); if (getesp() > MAIN_STACK_MAX) - panic("call16_sloppy with invalid stack\n"); + panic("call16_back with invalid stack\n"); + if (CONFIG_CALL32_SMM && Call16Data.method == C16_SMM) + return call16_smm(eax, edx, func); + + extern void transition16big(void); + extern void transition16(void); + void *thunk = transition16; + if (Call16Data.method == C16_BIG || in_post()) + thunk = transition16big; func -= BUILD_BIOS_ADDR; u32 stackseg = Call16Data.ss; asm volatile( // Transition to 16bit mode " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n" - " jmp transition16big\n" + " jmp *%%ecx\n" // Setup ss/esp and call func ASM32_SWITCH16 - "1:movl %3, %%ecx\n" - " shll $4, %3\n" + "1:movl %2, %%ecx\n" + " shll $4, %2\n" " movw %%cx, %%ss\n" - " subl %3, %%esp\n" + " subl %2, %%esp\n" " movw %%cx, %%ds\n" - " movl %2, %%edx\n" - " movl %1, %%ecx\n" + " movl %4, %%edx\n" + " movl %3, %%ecx\n" " calll _cfunc16_call16_helper\n" // Return to 32bit and restore esp " movl $2f, %%edx\n" " jmp transition32\n" ASM32_BACK32 - "2:addl %3, %%esp\n" - : "+a" (eax) - : "r" (func), "r" (edx), "r" (stackseg) - : "edx", "ecx", "cc", "memory"); + "2:addl %2, %%esp\n" + : "+a" (eax), "+c"(thunk), "+r"(stackseg) + : "r" (func), "r" (edx) + : "edx", "cc", "memory"); return eax; }
@@ -280,31 +289,16 @@ call32(void *func, u32 eax, u32 errret) return call32_sloppy(func, eax); }
-// Call a 16bit SeaBIOS function from a 32bit SeaBIOS function. +// Call a 16bit SeaBIOS function in regular ("non-big") mode. static u32 call16(u32 eax, u32 edx, void *func) { ASSERT32FLAT(); if (getesp() > BUILD_STACK_ADDR) panic("call16 with invalid stack\n"); - func -= BUILD_BIOS_ADDR; - asm volatile( - // Transition to 16bit mode - " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n" - " jmp transition16\n" - // Call func - ASM32_SWITCH16 - "1:movl %2, %%edx\n" - " calll *%1\n" - // Return to 32bit - " movl $2f, %%edx\n" - " jmp transition32\n" - ASM32_BACK32 - "2:\n" - : "+a" (eax) - : "r" (func), "r" (edx) - : "edx", "ecx", "cc", "memory"); - return eax; + memset(&Call16Data, 0, sizeof(Call16Data)); + Call16Data.a20 = !CONFIG_DISABLE_A20; + return call16_back(eax, edx, func); }
// Call a 16bit SeaBIOS function in "big real" mode. @@ -314,38 +308,10 @@ call16big(u32 eax, u32 edx, void *func) ASSERT32FLAT(); if (getesp() > BUILD_STACK_ADDR) panic("call16big with invalid stack\n"); - func -= BUILD_BIOS_ADDR; - asm volatile( - // Transition to 16bit mode - " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n" - " jmp transition16big\n" - // Call func - ASM32_SWITCH16 - "1:movl %2, %%edx\n" - " calll *%1\n" - // Return to 32bit - " movl $2f, %%edx\n" - " jmp transition32\n" - ASM32_BACK32 - "2:\n" - : "+a" (eax) - : "r" (func), "r" (edx) - : "edx", "ecx", "cc", "memory"); - return eax; -} - -// Call a 16bit SeaBIOS function, restoring the mode from last call32(). -static u32 -call16_back(u32 eax, u32 edx, void *func) -{ - ASSERT32FLAT(); - if (CONFIG_CALL32_SMM && Call16Data.method == C16_SMM) - return call16_smm(eax, edx, func); - if (Call16Data.method == C16_SLOPPY) - return call16_sloppy(eax, edx, func); - if (in_post()) - return call16big(eax, edx, func); - return call16(eax, edx, func); + memset(&Call16Data, 0, sizeof(Call16Data)); + Call16Data.method = C16_BIG; + Call16Data.a20 = 1; + return call16_back(eax, edx, func); }
Signed-off-by: Kevin O'Connor kevin@koconnor.net --- src/stacks.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-)
diff --git a/src/stacks.c b/src/stacks.c index f92a484..a1b8677 100644 --- a/src/stacks.c +++ b/src/stacks.c @@ -197,12 +197,20 @@ call16_smm(u32 eax, u32 edx, void *func) return eax; }
-// Call a C function in 32bit mode. This clobbers the 16bit segment -// selector registers. -static u32 -call32_sloppy(void *func, u32 eax) +// Call a 32bit SeaBIOS function from a 16bit SeaBIOS function. +u32 VISIBLE16 +call32(void *func, u32 eax, u32 errret) { ASSERT16(); + if (CONFIG_CALL32_SMM && GET_GLOBAL(HaveSmmCall32)) + return call32_smm(func, eax); + u32 cr0 = getcr0(); + if (cr0 & CR0_PE) + // Called in 16bit protected mode?! + return errret; + + // Jump direclty to 32bit mode - this clobbers the 16bit segment + // selector registers. call32_prep(C16_BIG); u32 bkup_ss, bkup_esp; asm volatile( @@ -275,20 +283,6 @@ call16_back(u32 eax, u32 edx, void *func) return eax; }
-// Call a 32bit SeaBIOS function from a 16bit SeaBIOS function. -u32 VISIBLE16 -call32(void *func, u32 eax, u32 errret) -{ - ASSERT16(); - if (CONFIG_CALL32_SMM && GET_GLOBAL(HaveSmmCall32)) - return call32_smm(func, eax); - u32 cr0 = getcr0(); - if (cr0 & CR0_PE) - // Called in 16bit protected mode?! - return errret; - return call32_sloppy(func, eax); -} - // Call a 16bit SeaBIOS function in regular ("non-big") mode. static u32 call16(u32 eax, u32 edx, void *func)
The call32() and call16_back() functions will always disable NMI and enable a20 (via the call32_prep() function) so it is safe to use the _nmi_off variant of transition32.
Signed-off-by: Kevin O'Connor kevin@koconnor.net --- src/romlayout.S | 1 + src/stacks.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/romlayout.S b/src/romlayout.S index e223cdc..fefc212 100644 --- a/src/romlayout.S +++ b/src/romlayout.S @@ -22,6 +22,7 @@ // %edx = return location (in 32bit mode) // Clobbers: ecx, flags, segment registers, cr0, idt/gdt DECLFUNC transition32 + .global transition32_nmi_off transition32: // Disable irqs (and clear direction flag) cli diff --git a/src/stacks.c b/src/stacks.c index a1b8677..850a335 100644 --- a/src/stacks.c +++ b/src/stacks.c @@ -223,7 +223,7 @@ call32(void *func, u32 eax, u32 errret)
// Transition to 32bit mode, call func, return to 16bit " movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%edx\n" - " jmp transition32\n" + " jmp transition32_nmi_off\n" ASM16_SWITCH32 "1:calll *%3\n" " movl $2f, %%edx\n" @@ -274,7 +274,7 @@ call16_back(u32 eax, u32 edx, void *func) " calll _cfunc16_call16_helper\n" // Return to 32bit and restore esp " movl $2f, %%edx\n" - " jmp transition32\n" + " jmp transition32_nmi_off\n" ASM32_BACK32 "2:addl %2, %%esp\n" : "+a" (eax), "+c"(thunk), "+r"(stackseg)
On Mon, Sep 14, 2015 at 10:26:21AM -0400, Kevin O'Connor wrote:
This series simplifies some of the existing trampoline code.
FYI, I committed this series.
-Kevin