Raul Rangel has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/59320 )
Change subject: lib: Add a mutex ......................................................................
lib: Add a mutex
We currently have two synchronization primatives, spinlock and thread_mutex. spinlock is meant to block multiple CPUs from entering a critical section. thread_mutex is meant to block multiple coop-threads from entering a critical section. It is not AP aware at all.
This CL introduces a mutex that can handle both concepts. The implementation is using the GCC/LLVM atomic builtin functions. The generated code uses the xchg instruction vs spinlock which uses (lock) decb.
8: b0 01 mov $0x1,%al a: 86 03 xchg %al,(%ebx) c: 84 c0 test %al,%al
BUG=b:179699789 TEST=Boot guybrush to OS
Signed-off-by: Raul E Rangel rrangel@chromium.org Change-Id: I41e02a54a17b1f6513b36a0274e43fc715472d78 --- A src/include/mutex.h M src/lib/Makefile.inc A src/lib/mutex.c 3 files changed, 77 insertions(+), 2 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/20/59320/1
diff --git a/src/include/mutex.h b/src/include/mutex.h new file mode 100644 index 0000000..cda5746 --- /dev/null +++ b/src/include/mutex.h @@ -0,0 +1,13 @@ +#ifndef _SMP_MUTEX_H +#define _SMP_MUTEX_H + +#include <types.h> + +struct mutex { + bool locked; +}; + +void mutex_lock(struct mutex *mutex); +void mutex_unlock(struct mutex *mutex); + +#endif /* _SMP_MUTEX_H */ diff --git a/src/lib/Makefile.inc b/src/lib/Makefile.inc index 1af7346..bda83ef 100644 --- a/src/lib/Makefile.inc +++ b/src/lib/Makefile.inc @@ -29,6 +29,8 @@ endif
all-y += list.c +all-y += mutex.c +smm-y += mutex.c
decompressor-y += decompressor.c $(call src-to-obj,decompressor,$(dir)/decompressor.c): $(objcbfs)/bootblock.lz4 @@ -318,8 +320,8 @@ # It will create the necessary Make rules to create a rmodule. The resulting # rmdoule is named $(1).rmod define rmodule_link -$(strip $(1)): $(strip $(2)) $$(COMPILER_RT_rmodules_$(4)) $(call src-to-obj,rmodules_$(4),src/lib/rmodule.ld) | $$(RMODTOOL) - $$(LD_rmodules_$(4)) $$(LDFLAGS_rmodules_$(4)) $(RMODULE_LDFLAGS) -T $(call src-to-obj,rmodules_$(4),src/lib/rmodule.ld) --defsym=__heap_size=$(strip $(3)) -o $$@ --whole-archive --start-group $(filter-out %.ld,$(2)) --end-group +$(strip $(1)): $(strip $(2)) $$(COMPILER_RT_$(4)) $(call src-to-obj,rmodules_$(4),src/lib/rmodule.ld) | $$(RMODTOOL) + $$(LD_rmodules_$(4)) $$(LDFLAGS_rmodules_$(4)) $(RMODULE_LDFLAGS) -T $(call src-to-obj,rmodules_$(4),src/lib/rmodule.ld) --defsym=__heap_size=$(strip $(3)) -o $$@ --whole-archive --start-group $(filter-out %.ld,$(2)) --no-whole-archive $$(COMPILER_RT_$(4)) --end-group $$(NM_rmodules_$(4)) -n $$@ > $$(basename $$@).map endef
diff --git a/src/lib/mutex.c b/src/lib/mutex.c new file mode 100644 index 0000000..1467b33 --- /dev/null +++ b/src/lib/mutex.c @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <mutex.h> +#include <thread.h> +#include <types.h> + +static void mutex_lock_smp(struct mutex *mutex) +{ + while (true) { + if (__atomic_test_and_set(&mutex->locked, __ATOMIC_ACQUIRE) == 0) + return; + + /* + * If we failed to get the lock, wait until it's been unlocked before trying to + * read-modify-write. This helps prevent excessive cache misses. + */ + do { + /* + * Threads are only available on the BSP, so if this code is running + * on an AP, yielding will fail. In that case just sleep for a bit + * and try again. + */ + if (thread_yield() < 0) { +#if ENV_X86 + __builtin_ia32_pause(); +#else + continue; +#endif + } + + } while (__atomic_load_1(&mutex->locked, __ATOMIC_RELAXED)); + } +} + +static void mutex_lock_nosmp(struct mutex *mutex) +{ + while (mutex->locked) + assert(thread_yield() >= 0); + + mutex->locked = true; +} + +void mutex_lock(struct mutex *mutex) +{ + if (ENV_STAGE_SUPPORTS_SMP) + mutex_lock_smp(mutex); + else + mutex_lock_nosmp(mutex); +} + +void mutex_unlock(struct mutex *mutex) +{ + if (ENV_STAGE_SUPPORTS_SMP) { + assert(__atomic_load_1(&mutex->locked, __ATOMIC_RELAXED)); + __atomic_clear(&mutex->locked, __ATOMIC_RELEASE); + } else { + assert(mutex->locked); + mutex->locked = false; + } +}