Attention is currently required from: Philipp Hug, Ron Minnich.
Maximilian Brune has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/83754?usp=email )
Change subject: arch/riscv/include/mcall.h: Remove unused ......................................................................
arch/riscv/include/mcall.h: Remove unused
Signed-off-by: Maximilian Brune maximilian.brune@9elements.com Change-Id: Ic633899dd476e1062bb805222bc6b02af4d47bd6 --- M src/arch/riscv/include/mcall.h M src/arch/riscv/smp.c 2 files changed, 7 insertions(+), 22 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/54/83754/1
diff --git a/src/arch/riscv/include/mcall.h b/src/arch/riscv/include/mcall.h index 69eb574..eec8a8c 100644 --- a/src/arch/riscv/include/mcall.h +++ b/src/arch/riscv/include/mcall.h @@ -3,14 +3,12 @@ #ifndef _MCALL_H #define _MCALL_H
-// NOTE: this is the size of struct hls below. A static_assert would be -// nice to have. #if __riscv_xlen == 64 -#define HLS_SIZE 96 +#define HLS_SIZE 64 #endif
#if __riscv_xlen == 32 -#define HLS_SIZE 56 +#define HLS_SIZE 48 #endif
/* We save 37 registers, currently. */ @@ -22,26 +20,13 @@ #include <arch/smp/atomic.h> #include <stdint.h>
-struct sbi_device_message { - unsigned long dev; - unsigned long cmd; - unsigned long data; - unsigned long sbi_private_data; -}; - struct blocker { void *arg; void (*fn)(void *arg); - atomic_t sync_a; - atomic_t sync_b; + atomic_t sync; };
struct hls { - struct sbi_device_message *device_request_queue_head; - unsigned long device_request_queue_size; - struct sbi_device_message *device_response_queue_head; - struct sbi_device_message *device_response_queue_tail; - int enabled; int hart_id; int ipi_pending; diff --git a/src/arch/riscv/smp.c b/src/arch/riscv/smp.c index 811719e..eb3c6cc 100644 --- a/src/arch/riscv/smp.c +++ b/src/arch/riscv/smp.c @@ -17,14 +17,14 @@ set_msip(hartid, 0); // clear pending interrupts write_csr(mie, MIP_MSIP); // enable only IPI (for smp_resume) barrier(); - atomic_set(&HLS()->entry.sync_a, 0x01234567); // mark the hart as sleeping. + atomic_set(&HLS()->entry.sync, 0x01234567); // mark the hart as sleeping.
// pause hart do { __asm__ volatile ("wfi"); // wait for interrupt } while ((read_csr(mip) & MIP_MSIP) == 0);
- atomic_set(&HLS()->entry.sync_a, 0); // mark the hart as awake + atomic_set(&HLS()->entry.sync, 0); // mark the hart as awake HLS()->entry.fn(HLS()->entry.arg); } } @@ -51,7 +51,7 @@ if (i == working_hartid) continue;
- if (atomic_read(&OTHER_HLS(i)->entry.sync_a) != 0x01234567) { + if (atomic_read(&OTHER_HLS(i)->entry.sync) != 0x01234567) { /* * we assmue here that the time between smp_pause and smp_resume * is enough for all harts to reach the smp_pause state. @@ -77,7 +77,7 @@ continue;
// wait for hart to publish its waking state - while (atomic_read(&OTHER_HLS(i)->entry.sync_a) != 0) + while (atomic_read(&OTHER_HLS(i)->entry.sync) != 0) ; count_awake_harts++; }