Xiang Wang has uploaded this change for review. ( https://review.coreboot.org/27972
Change subject: riscv: update misaligned memory access exception handling ......................................................................
riscv: update misaligned memory access exception handling
Support for more situations: floating point, compressed instructions, etc. Add support for redirect exception to s-mode. fix DEFINE_MPRV_READ to support that reading the page which is executable-only (R=0 X=1).
Change-Id: I9983d56245eab1d458a84cb1432aeb805df7a49f Signed-off-by: Xiang Wang wxjstz@126.com --- M src/arch/riscv/Makefile.inc A src/arch/riscv/fp_asm.S M src/arch/riscv/include/arch/exception.h M src/arch/riscv/include/vm.h A src/arch/riscv/misaligend.c M src/arch/riscv/trap_handler.c 6 files changed, 617 insertions(+), 65 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/72/27972/1
diff --git a/src/arch/riscv/Makefile.inc b/src/arch/riscv/Makefile.inc index 85bec43..86ed8b5 100644 --- a/src/arch/riscv/Makefile.inc +++ b/src/arch/riscv/Makefile.inc @@ -46,6 +46,8 @@ bootblock-y = bootblock.S stages.c bootblock-y += trap_util.S bootblock-y += trap_handler.c +bootblock-y += fp_asm.S +bootblock-y += misaligend.c bootblock-y += mcall.c bootblock-y += virtual_memory.c bootblock-y += boot.c diff --git a/src/arch/riscv/fp_asm.S b/src/arch/riscv/fp_asm.S new file mode 100644 index 0000000..92546b4 --- /dev/null +++ b/src/arch/riscv/fp_asm.S @@ -0,0 +1,341 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2018 HardenedLinux + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#if __riscv_flen >= 32 + + .text + +/* void read_f32(int regnum, uint32_t* v) */ + .align 1 + .globl read_f32 +read_f32: + la a2, .Lr32_t + andi a0, a0, 31 + slli a0, a0, 2 + add a0, a0, a2 + lw a0, 0(a0) + add a0, a0, a2 + jr a0 + .align 2 +.Lr32_t: + .word .Lr32_f0 - .Lr32_t + .word .Lr32_f1 - .Lr32_t + .word .Lr32_f2 - .Lr32_t + .word .Lr32_f3 - .Lr32_t + .word .Lr32_f4 - .Lr32_t + .word .Lr32_f5 - .Lr32_t + .word .Lr32_f6 - .Lr32_t + .word .Lr32_f7 - .Lr32_t + .word .Lr32_f8 - .Lr32_t + .word .Lr32_f9 - .Lr32_t + .word .Lr32_f10 - .Lr32_t + .word .Lr32_f11 - .Lr32_t + .word .Lr32_f12 - .Lr32_t + .word .Lr32_f13 - .Lr32_t + .word .Lr32_f14 - .Lr32_t + .word .Lr32_f15 - .Lr32_t + .word .Lr32_f16 - .Lr32_t + .word .Lr32_f17 - .Lr32_t + .word .Lr32_f18 - .Lr32_t + .word .Lr32_f19 - .Lr32_t + .word .Lr32_f20 - .Lr32_t + .word .Lr32_f21 - .Lr32_t + .word .Lr32_f22 - .Lr32_t + .word .Lr32_f23 - .Lr32_t + .word .Lr32_f24 - .Lr32_t + .word .Lr32_f25 - .Lr32_t + .word .Lr32_f26 - .Lr32_t + .word .Lr32_f27 - .Lr32_t + .word .Lr32_f28 - .Lr32_t + .word .Lr32_f29 - .Lr32_t + .word .Lr32_f30 - .Lr32_t + .word .Lr32_f31 - .Lr32_t +#define read32(which) .Lr32_##which: fsw which, 0(a1); ret + read32(f0) + read32(f1) + read32(f2) + read32(f3) + read32(f4) + read32(f5) + read32(f6) + read32(f7) + read32(f8) + read32(f9) + read32(f10) + read32(f11) + read32(f12) + read32(f13) + read32(f14) + read32(f15) + read32(f16) + read32(f17) + read32(f18) + read32(f19) + read32(f20) + read32(f21) + read32(f22) + read32(f23) + read32(f24) + read32(f25) + read32(f26) + read32(f27) + read32(f28) + read32(f29) + read32(f30) + read32(f31) + +/* void write_f32(int regnum, uint32_t* v) */ + .align 1 + .globl write_f32 +write_f32: + la a2, .Lw32_t + andi a0, a0, 31 + slli a0, a0, 2 + add a0, a0, a2 + lw a0, 0(a0) + add a0, a0, a2 + jr a0 + .align 2 +.Lw32_t: + .word .Lw32_f0 - .Lw32_t + .word .Lw32_f1 - .Lw32_t + .word .Lw32_f2 - .Lw32_t + .word .Lw32_f3 - .Lw32_t + .word .Lw32_f4 - .Lw32_t + .word .Lw32_f5 - .Lw32_t + .word .Lw32_f6 - .Lw32_t + .word .Lw32_f7 - .Lw32_t + .word .Lw32_f8 - .Lw32_t + .word .Lw32_f9 - .Lw32_t + .word .Lw32_f10 - .Lw32_t + .word .Lw32_f11 - .Lw32_t + .word .Lw32_f12 - .Lw32_t + .word .Lw32_f13 - .Lw32_t + .word .Lw32_f14 - .Lw32_t + .word .Lw32_f15 - .Lw32_t + .word .Lw32_f16 - .Lw32_t + .word .Lw32_f17 - .Lw32_t + .word .Lw32_f18 - .Lw32_t + .word .Lw32_f19 - .Lw32_t + .word .Lw32_f20 - .Lw32_t + .word .Lw32_f21 - .Lw32_t + .word .Lw32_f22 - .Lw32_t + .word .Lw32_f23 - .Lw32_t + .word .Lw32_f24 - .Lw32_t + .word .Lw32_f25 - .Lw32_t + .word .Lw32_f26 - .Lw32_t + .word .Lw32_f27 - .Lw32_t + .word .Lw32_f28 - .Lw32_t + .word .Lw32_f29 - .Lw32_t + .word .Lw32_f30 - .Lw32_t + .word .Lw32_f31 - .Lw32_t +#define write32(which) .Lw32_##which: flw which, 0(a1); ret + write32(f0) + write32(f1) + write32(f2) + write32(f3) + write32(f4) + write32(f5) + write32(f6) + write32(f7) + write32(f8) + write32(f9) + write32(f10) + write32(f11) + write32(f12) + write32(f13) + write32(f14) + write32(f15) + write32(f16) + write32(f17) + write32(f18) + write32(f19) + write32(f20) + write32(f21) + write32(f22) + write32(f23) + write32(f24) + write32(f25) + write32(f26) + write32(f27) + write32(f28) + write32(f29) + write32(f30) + write32(f31) +#endif + +#if __riscv_flen >= 64 + + .text + +/* void read_f64(int regnum, uint64_t* v) */ + .align 1 + .globl read_f64 +read_f64: + la a2, .Lr64_t + andi a0, a0, 31 + slli a0, a0, 2 + add a0, a0, a2 + lw a0, 0(a0) + add a0, a0, a2 + jr a0 + .align 2 +.Lr64_t: + .word .Lr64_f0 - .Lr64_t + .word .Lr64_f1 - .Lr64_t + .word .Lr64_f2 - .Lr64_t + .word .Lr64_f3 - .Lr64_t + .word .Lr64_f4 - .Lr64_t + .word .Lr64_f5 - .Lr64_t + .word .Lr64_f6 - .Lr64_t + .word .Lr64_f7 - .Lr64_t + .word .Lr64_f8 - .Lr64_t + .word .Lr64_f9 - .Lr64_t + .word .Lr64_f10 - .Lr64_t + .word .Lr64_f11 - .Lr64_t + .word .Lr64_f12 - .Lr64_t + .word .Lr64_f13 - .Lr64_t + .word .Lr64_f14 - .Lr64_t + .word .Lr64_f15 - .Lr64_t + .word .Lr64_f16 - .Lr64_t + .word .Lr64_f17 - .Lr64_t + .word .Lr64_f18 - .Lr64_t + .word .Lr64_f19 - .Lr64_t + .word .Lr64_f20 - .Lr64_t + .word .Lr64_f21 - .Lr64_t + .word .Lr64_f22 - .Lr64_t + .word .Lr64_f23 - .Lr64_t + .word .Lr64_f24 - .Lr64_t + .word .Lr64_f25 - .Lr64_t + .word .Lr64_f26 - .Lr64_t + .word .Lr64_f27 - .Lr64_t + .word .Lr64_f28 - .Lr64_t + .word .Lr64_f29 - .Lr64_t + .word .Lr64_f30 - .Lr64_t + .word .Lr64_f31 - .Lr64_t +#define read64(which) .Lr64_##which: fsd which, 0(a1); ret + read64(f0) + read64(f1) + read64(f2) + read64(f3) + read64(f4) + read64(f5) + read64(f6) + read64(f7) + read64(f8) + read64(f9) + read64(f10) + read64(f11) + read64(f12) + read64(f13) + read64(f14) + read64(f15) + read64(f16) + read64(f17) + read64(f18) + read64(f19) + read64(f20) + read64(f21) + read64(f22) + read64(f23) + read64(f24) + read64(f25) + read64(f26) + read64(f27) + read64(f28) + read64(f29) + read64(f30) + read64(f31) + +/* void write_f64(int regnum, uint64_t* v) */ + .align 1 + .globl write_f64 +write_f64: + la a2, .Lw64_t + andi a0, a0, 31 + slli a0, a0, 2 + add a0, a0, a2 + lw a0, 0(a0) + add a0, a0, a2 + jr a0 + .align 2 +.Lw64_t: + .word .Lw64_f0 - .Lw64_t + .word .Lw64_f1 - .Lw64_t + .word .Lw64_f2 - .Lw64_t + .word .Lw64_f3 - .Lw64_t + .word .Lw64_f4 - .Lw64_t + .word .Lw64_f5 - .Lw64_t + .word .Lw64_f6 - .Lw64_t + .word .Lw64_f7 - .Lw64_t + .word .Lw64_f8 - .Lw64_t + .word .Lw64_f9 - .Lw64_t + .word .Lw64_f10 - .Lw64_t + .word .Lw64_f11 - .Lw64_t + .word .Lw64_f12 - .Lw64_t + .word .Lw64_f13 - .Lw64_t + .word .Lw64_f14 - .Lw64_t + .word .Lw64_f15 - .Lw64_t + .word .Lw64_f16 - .Lw64_t + .word .Lw64_f17 - .Lw64_t + .word .Lw64_f18 - .Lw64_t + .word .Lw64_f19 - .Lw64_t + .word .Lw64_f20 - .Lw64_t + .word .Lw64_f21 - .Lw64_t + .word .Lw64_f22 - .Lw64_t + .word .Lw64_f23 - .Lw64_t + .word .Lw64_f24 - .Lw64_t + .word .Lw64_f25 - .Lw64_t + .word .Lw64_f26 - .Lw64_t + .word .Lw64_f27 - .Lw64_t + .word .Lw64_f28 - .Lw64_t + .word .Lw64_f29 - .Lw64_t + .word .Lw64_f30 - .Lw64_t + .word .Lw64_f31 - .Lw64_t +#define write64(which) .Lw64_##which: fld which, 0(a1); ret + write64(f0) + write64(f1) + write64(f2) + write64(f3) + write64(f4) + write64(f5) + write64(f6) + write64(f7) + write64(f8) + write64(f9) + write64(f10) + write64(f11) + write64(f12) + write64(f13) + write64(f14) + write64(f15) + write64(f16) + write64(f17) + write64(f18) + write64(f19) + write64(f20) + write64(f21) + write64(f22) + write64(f23) + write64(f24) + write64(f25) + write64(f26) + write64(f27) + write64(f28) + write64(f29) + write64(f30) + write64(f31) + +#endif diff --git a/src/arch/riscv/include/arch/exception.h b/src/arch/riscv/include/arch/exception.h index fc57b3b..6fbbdf0 100644 --- a/src/arch/riscv/include/arch/exception.h +++ b/src/arch/riscv/include/arch/exception.h @@ -32,8 +32,7 @@
#include <stdint.h>
-typedef struct -{ +typedef struct { uintptr_t gpr[32]; uintptr_t status; uintptr_t epc; @@ -53,9 +52,9 @@ { }
-void trap_handler(trapframe* tf); -void handle_supervisor_call(trapframe* tf); -void handle_misaligned_load(trapframe *tf); -void handle_misaligned_store(trapframe *tf); +void redirect_trap(void); +void trap_handler(trapframe *tf); +void handle_supervisor_call(trapframe *tf); +void handle_misaligned(trapframe *tf);
#endif diff --git a/src/arch/riscv/include/vm.h b/src/arch/riscv/include/vm.h index a30d6bb..2925977 100644 --- a/src/arch/riscv/include/vm.h +++ b/src/arch/riscv/include/vm.h @@ -42,7 +42,7 @@ static inline type name(type *p); \ static inline type name(type *p) \ { \ - size_t mprv = MSTATUS_MPRV; \ + size_t mprv = MSTATUS_MPRV | MSTATUS_MXR; \ type value; \ asm ( \ "csrs mstatus, %1\n" \ diff --git a/src/arch/riscv/misaligend.c b/src/arch/riscv/misaligend.c new file mode 100644 index 0000000..a282a05 --- /dev/null +++ b/src/arch/riscv/misaligend.c @@ -0,0 +1,253 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2018 HardenedLinux + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <stddef.h> +#include <stdint.h> +#include <vm.h> +#include <arch/exception.h> + +/* this function define in src/arch/riscv/fp_asm.S */ +#if __riscv_flen >= 32 +extern void read_f32(int regnum, uint32_t *v); +extern void write_f32(int regnum, uint32_t *v); +#endif + +#if __riscv_flen >= 64 +extern void read_f64(int regnum, uint64_t *v); +extern void write_f64(int regnum, uint64_t *v); +#endif + + +union endian_buff { + uint8_t b[8]; + uint16_t h[4]; + uint32_t w[2]; + uint64_t d[1]; + uintptr_t v; +}; + +struct memory_instruction_info { + uintptr_t opcode; + uintptr_t mask; + int reg_shift; + int reg_mask; + int reg_addition; + int is_fp; + int is_load; + int width; + int sign_extend; +}; + +static struct memory_instruction_info insn_info[] = { +#if __riscv_xlen == 128 + { 0x00002000, 0x0000e003, 2, 7, 8, 0, 1, 16, 1}, // C.LQ +#else + { 0x00002000, 0x0000e003, 2, 7, 8, 1, 1, 8, 0}, // C.FLD +#endif + { 0x00004000, 0x0000e003, 2, 7, 8, 0, 1, 4, 1}, // C.LW +#if __riscv_xlen == 32 + { 0x00006000, 0x0000e003, 2, 7, 8, 1, 1, 4, 0}, // C.FLW +#else + { 0x00006000, 0x0000e003, 2, 7, 8, 0, 1, 8, 1}, // C.LD +#endif + +#if __riscv_xlen == 128 + { 0x0000a000, 0x0000e003, 2, 7, 8, 0, 0, 16, 0}, // C.SQ +#else + { 0x0000a000, 0x0000e003, 2, 7, 8, 1, 0, 8, 0}, // C.FSD +#endif + { 0x0000c000, 0x0000e003, 2, 7, 8, 0, 0, 4, 0}, // C.SW +#if __riscv_xlen == 32 + { 0x0000e000, 0x0000e003, 2, 7, 8, 1, 0, 4, 0}, // C.FSW +#else + { 0x0000e000, 0x0000e003, 2, 7, 8, 0, 0, 8, 0}, // C.SD +#endif + +#if __riscv_xlen == 128 + { 0x00002002, 0x0000e003, 7, 15, 0, 0, 1, 16, 1}, // C.LQSP +#else + { 0x00002002, 0x0000e003, 7, 15, 0, 1, 1, 8, 0}, // C.FLDSP +#endif + { 0x00004002, 0x0000e003, 7, 15, 0, 0, 1, 4, 1}, // C.LWSP +#if __riscv_xlen == 32 + { 0x00006002, 0x0000e003, 7, 15, 0, 1, 1, 4, 0}, // C.FLWSP +#else + { 0x00006002, 0x0000e003, 7, 15, 0, 0, 1, 8, 1}, // C.LDSP +#endif + +#if __riscv_xlen == 128 + { 0x0000a002, 0x0000e003, 2, 15, 0, 0, 0, 16, 0}, // C.SQSP +#else + { 0x0000a002, 0x0000e003, 2, 15, 0, 1, 0, 8, 0}, // C.FSDSP +#endif + { 0x0000c002, 0x0000e003, 2, 15, 0, 0, 0, 4, 0}, // C.SWSP +#if __riscv_xlen == 32 + { 0x0000e002, 0x0000e003, 2, 15, 0, 1, 0, 4, 0}, // C.FSWSP +#else + { 0x0000e002, 0x0000e003, 2, 15, 0, 0, 0, 8, 0}, // C.SDSP +#endif + + { 0x00000003, 0x0000707f, 7, 15, 0, 0, 1, 1, 1}, // LB + { 0x00001003, 0x0000707f, 7, 15, 0, 0, 1, 2, 1}, // LH + { 0x00002003, 0x0000707f, 7, 15, 0, 0, 1, 4, 1}, // LW +#if __riscv_xlen > 32 + { 0x00003003, 0x0000707f, 7, 15, 0, 0, 1, 8, 1}, // LD +#endif + { 0x00004003, 0x0000707f, 7, 15, 0, 0, 1, 1, 0}, // LBU + { 0x00005003, 0x0000707f, 7, 15, 0, 0, 1, 2, 0}, // LHU + { 0x00006003, 0x0000707f, 7, 15, 0, 0, 1, 4, 0}, // LWU + + { 0x00000023, 0x0000707f, 20, 15, 0, 0, 0, 1, 0}, // SB + { 0x00001023, 0x0000707f, 20, 15, 0, 0, 0, 2, 0}, // SH + { 0x00002023, 0x0000707f, 20, 15, 0, 0, 0, 4, 0}, // SW +#if __riscv_xlen > 32 + { 0x00003023, 0x0000707f, 20, 15, 0, 0, 0, 8, 0}, // SD +#endif + +#if __riscv_flen >= 32 + { 0x00002007, 0x0000707f, 7, 15, 0, 1, 1, 4, 0}, // FLW + { 0x00003007, 0x0000707f, 7, 15, 0, 1, 1, 8, 0}, // FLD +#endif + +#if __riscv_flen >= 64 + { 0x00002027, 0x0000707f, 20, 15, 0, 1, 0, 4, 0}, // FSW + { 0x00003027, 0x0000707f, 20, 15, 0, 1, 0, 8, 0}, // FSD +#endif + { 0, 0, 0, 0, 0, 0, 0, 0, 0} +}; + +static struct memory_instruction_info *match_instruction(uintptr_t insn) +{ + struct memory_instruction_info *p; + for (p = insn_info; p->opcode; p++) + if ((insn & p->mask) == p->opcode) + return p; + return NULL; +} + + +static int fetch_16bit_instruction(uintptr_t vaddr, uintptr_t *insn) +{ + uint16_t t = mprv_read_u16((uint16_t *)vaddr); + if (EXTRACT_FIELD(t, 0x3) != 3) { + *insn = t; + return 0; + } + return -1; +} + +static int fetch_32bit_instruction(uintptr_t vaddr, uintptr_t *insn) +{ + uint32_t t = mprv_read_u32((uint32_t *)vaddr); + if ((EXTRACT_FIELD(t, 0x3) == 3) && (EXTRACT_FIELD(t, 0x1c) != 0x7)) { + *insn = t; + return 0; + } + return -1; +} + + +void handle_misaligned(trapframe *tf) +{ + uintptr_t insn = 0; + union endian_buff buff; + + /* try to fetch 16/32 bits instruction */ + if (fetch_16bit_instruction(tf->epc, &insn)) + if (fetch_32bit_instruction(tf->epc, &insn)) + redirect_trap(); + + /* matching instruction */ + struct memory_instruction_info *match = match_instruction(insn); + + if (match) { + int regnum = ((insn >> match->reg_shift) & match->reg_mask) + \ + match->reg_addition; + buff.v = 0; + if (match->is_load) { + /* load operate */ + + /* reading from memory by bytes prevents misaligned + * memory access */ + for (int i = 0; i < match->width; i++) + buff.b[i] = mprv_read_u8((uint8_t *) \ + (tf->badvaddr + i)); + + /* sign extend for signed integer loading */ + if (match->sign_extend) + if (buff.v >> (8 * match->width - 1)) + buff.v |= -1 << (8 * match->width); + + /* write to register */ + if (match->is_fp) { + do { +#if __riscv_flen >= 32 + /* single-precision floating-point */ + if (match->width == 4) { + write_f32(regnum, buff.w); + break; + } +#endif +#if __riscv_flen >= 64 + /* double-precision floating-point */ + if (match->width == 8) { + write_f64(regnum, buff.d); + break; + } +#endif + redirect_trap(); + } while (0); + } else { + tf->gpr[regnum] = buff.v; + } + } else { + /* store operate */ + + /* reading from register */ + if (match->is_fp) { + do { +#if __riscv_flen >= 32 + if (match->width == 4) { + read_f32(regnum, buff.w); + break; + } +#endif +#if __riscv_flen >= 64 + if (match->width == 8) { + read_f64(regnum, buff.d); + break; + } +#endif + redirect_trap(); + } while (0); + } else { + buff.v = tf->gpr[regnum]; + } + + /* writing to memory by bytes prevents misaligned\ + * memory access */ + for (int i = 0; i < match->width; i++) + mprv_write_u8((uint8_t *)(tf->badvaddr + i), \ + buff.b[i]); + } + } else { + redirect_trap(); + } +} + + + + + diff --git a/src/arch/riscv/trap_handler.c b/src/arch/riscv/trap_handler.c index 7b35c2e..6888c8d 100644 --- a/src/arch/riscv/trap_handler.c +++ b/src/arch/riscv/trap_handler.c @@ -167,11 +167,11 @@ break; case CAUSE_MISALIGNED_LOAD: print_trap_information(tf); - handle_misaligned_load(tf); + handle_misaligned(tf); return; case CAUSE_MISALIGNED_STORE: print_trap_information(tf); - handle_misaligned_store(tf); + handle_misaligned(tf); return; default: printk(BIOS_EMERG, "================================\n"); @@ -184,62 +184,19 @@ die("Can't recover from trap. Halting.\n"); }
-static uint32_t fetch_instruction(uintptr_t vaddr) { - printk(BIOS_SPEW, "fetching instruction at 0x%016zx\n", (size_t)vaddr); - return mprv_read_u32((uint32_t *) vaddr); + +void redirect_trap(void) +{ + write_csr(sbadaddr, read_csr(mbadaddr)); + write_csr(sepc, read_csr(mepc)); + write_csr(scause, read_csr(mcause)); + write_csr(mepc, read_csr(stvec)); + + uintptr_t status = read_csr(mstatus); + uintptr_t mpp = EXTRACT_FIELD(status, 0x1800); + status = INSERT_FIELD(status, 0x1800, 1); + status = INSERT_FIELD(status, 0x100, mpp & 1); + write_csr(mstatus, status); }
-void handle_misaligned_load(trapframe *tf) { - printk(BIOS_DEBUG, "Trapframe ptr: %p\n", tf); - uintptr_t faultingInstructionAddr = tf->epc; - insn_t faultingInstruction = fetch_instruction(faultingInstructionAddr); - printk(BIOS_DEBUG, "Faulting instruction: 0x%x\n", faultingInstruction); - insn_t widthMask = 0x7000; - insn_t memWidth = (faultingInstruction & widthMask) >> 12; - insn_t destMask = 0xF80; - insn_t destRegister = (faultingInstruction & destMask) >> 7; - printk(BIOS_DEBUG, "Width: %d bits\n", (1 << memWidth) * 8); - if (memWidth == 3) { - // load double, handle the issue - void* badAddress = (void*) tf->badvaddr; - uint64_t value = 0; - for (int i = 0; i < 8; i++) { - value <<= 8; - value += mprv_read_u8(badAddress+i); - } - tf->gpr[destRegister] = value; - } else { - // panic, this should not have happened - die("Code should not reach this path, misaligned on a non-64 bit store/load\n"); - }
- // return to where we came from - write_csr(mepc, read_csr(mepc) + 4); -} - -void handle_misaligned_store(trapframe *tf) { - printk(BIOS_DEBUG, "Trapframe ptr: %p\n", tf); - uintptr_t faultingInstructionAddr = tf->epc; - insn_t faultingInstruction = fetch_instruction(faultingInstructionAddr); - printk(BIOS_DEBUG, "Faulting instruction: 0x%x\n", faultingInstruction); - insn_t widthMask = 0x7000; - insn_t memWidth = (faultingInstruction & widthMask) >> 12; - insn_t srcMask = 0x1F00000; - insn_t srcRegister = (faultingInstruction & srcMask) >> 20; - printk(BIOS_DEBUG, "Width: %d bits\n", (1 << memWidth) * 8); - if (memWidth == 3) { - // store double, handle the issue - void* badAddress = (void*) tf->badvaddr; - uint64_t value = tf->gpr[srcRegister]; - for (int i = 0; i < 8; i++) { - mprv_write_u8(badAddress+i, value); - value >>= 8; - } - } else { - // panic, this should not have happened - die("Code should not reach this path, misaligned on a non-64 bit store/load\n"); - } - - // return to where we came from - write_csr(mepc, read_csr(mepc) + 4); -}