Attention is currently required from: Raul Rangel. Hello Raul Rangel,
I'd like you to do a code review. Please visit
https://review.coreboot.org/c/coreboot/+/60253
to review the following change.
Change subject: [RFC] smp/spinlock: Use compiler builtins ......................................................................
[RFC] smp/spinlock: Use compiler builtins
Change-Id: I6694784aa0810cd877886ef95434f62e3aefce6c Signed-off-by: Raul E Rangel rrangel@chromium.org Signed-off-by: Kyösti Mälkki kyosti.malkki@gmail.com --- D src/arch/x86/include/arch/smp/spinlock.h M src/console/printk.c A src/include/mutex.h M src/include/rules.h M src/include/smp/spinlock.h 5 files changed, 84 insertions(+), 81 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/53/60253/1
diff --git a/src/arch/x86/include/arch/smp/spinlock.h b/src/arch/x86/include/arch/smp/spinlock.h deleted file mode 100644 index c7008c1..0000000 --- a/src/arch/x86/include/arch/smp/spinlock.h +++ /dev/null @@ -1,70 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -#ifndef ARCH_SMP_SPINLOCK_H -#define ARCH_SMP_SPINLOCK_H - -#include <thread.h> - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile unsigned int lock; -} spinlock_t; - -#define SPIN_LOCK_UNLOCKED { 1 } - -#define DECLARE_SPIN_LOCK(x) \ - static spinlock_t x = SPIN_LOCK_UNLOCKED; - -/* - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - */ -#define barrier() __asm__ __volatile__("" : : : "memory") -#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) -#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) -#undef barrier - -#define spin_lock_string \ - "\n1:\t" \ - "lock ; decb %0\n\t" \ - "js 2f\n" \ - ".section .text.lock,"ax"\n" \ - "2:\t" \ - "cmpb $0,%0\n\t" \ - "rep;nop\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - ".previous" - -/* - * This works. Despite all the confusion. - */ -#define spin_unlock_string \ - "movb $1,%0" - -static __always_inline void spin_lock(spinlock_t *lock) -{ - __asm__ __volatile__( - spin_lock_string - : "=m" (lock->lock) : : "memory"); - - /* Switching contexts while holding a spinlock will lead to deadlocks */ - thread_coop_disable(); - -} - -static __always_inline void spin_unlock(spinlock_t *lock) -{ - thread_coop_enable(); - - __asm__ __volatile__( - spin_unlock_string - : "=m" (lock->lock) : : "memory"); -} - -#endif /* ARCH_SMP_SPINLOCK_H */ diff --git a/src/console/printk.c b/src/console/printk.c index 1ed39cb..b54cd9a 100644 --- a/src/console/printk.c +++ b/src/console/printk.c @@ -8,6 +8,7 @@ #include <console/console.h> #include <console/streams.h> #include <console/vtxprintf.h> +#include <mutex.h> #include <smp/spinlock.h> #include <smp/node.h> #include <timer.h> @@ -80,7 +81,7 @@ if (log_this < CONSOLE_LOG_FAST) return 0;
- spin_lock(&console_lock); + mutex_lock(&console_lock);
console_time_run();
@@ -93,7 +94,7 @@
console_time_stop();
- spin_unlock(&console_lock); + mutex_unlock(&console_lock);
return i; } diff --git a/src/include/mutex.h b/src/include/mutex.h new file mode 100644 index 0000000..d4642f9 --- /dev/null +++ b/src/include/mutex.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _MUTEX_H +#define _MUTEX_H + +#include <arch/cpu.h> +#include <smp/spinlock.h> +#include <thread.h> + +static inline void mutex_lock(struct spinlock *lock) +{ + while (!spinlock_try_lock(lock)) { + /* + * If we failed to get the lock, wait until it's been unlocked before trying to + * read-modify-write. This helps prevent excessive cache misses. + */ + while (spinlock_is_locked(lock)) { + /* + * Threads are only available on the BSP, so if this code is running + * on an AP, yielding will fail. In that case just sleep for a bit + * and try again. + */ + if (thread_yield() < 0) + cpu_relax(); + } + } +} + +static inline void mutex_unlock(struct spinlock *lock) +{ + spin_unlock(lock); +} + +#endif /* _MUTEX_H */ diff --git a/src/include/rules.h b/src/include/rules.h index 02b55c5..f1ba3ab 100644 --- a/src/include/rules.h +++ b/src/include/rules.h @@ -300,10 +300,9 @@ #define STAGE_HAS_SPINLOCKS 0 #endif
-/* When set <arch/smp/spinlock.h> is included for the spinlock implementation. */ #define ENV_STAGE_SUPPORTS_SMP (CONFIG(SMP) && STAGE_HAS_SPINLOCKS)
-#if ENV_X86 && CONFIG(COOP_MULTITASKING) && (ENV_RAMSTAGE || ENV_ROMSTAGE) +#if CONFIG(COOP_MULTITASKING) && (ENV_RAMSTAGE || ENV_ROMSTAGE) /* TODO: Enable in all x86 stages */ #define ENV_STAGE_SUPPORTS_COOP 1 #else diff --git a/src/include/smp/spinlock.h b/src/include/smp/spinlock.h index 116830c..28287ca 100644 --- a/src/include/smp/spinlock.h +++ b/src/include/smp/spinlock.h @@ -1,15 +1,54 @@ #ifndef SMP_SPINLOCK_H #define SMP_SPINLOCK_H
-#if ENV_STAGE_SUPPORTS_SMP -#include <arch/smp/spinlock.h> +#include <arch/cpu.h> +#include <types.h> + +struct spinlock { + bool locked; +}; + +#define DECLARE_SPIN_LOCK(x) static struct spinlock x; + +#if ENV_STAGE_SUPPORTS_SMP || ENV_STAGE_SUPPORTS_COOP + +static __always_inline bool spinlock_try_lock(struct spinlock *lock) +{ + return !__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE); +} + +static __always_inline bool spinlock_is_locked(struct spinlock *lock) +{ + return !__atomic_load_1(&lock->locked, __ATOMIC_RELAXED); +} + +static __always_inline void spin_lock(struct spinlock *lock) +{ + while (!spinlock_try_lock(lock)) { + cpu_relax(); + } +} + +static __always_inline void spin_unlock(struct spinlock *lock) +{ + __atomic_clear(&lock->locked, __ATOMIC_RELEASE); +} + #else /* !CONFIG_SMP */
-#define DECLARE_SPIN_LOCK(x) -#define spin_is_locked(lock) 0 -#define spin_unlock_wait(lock) do {} while (0) -#define spin_lock(lock) do {} while (0) -#define spin_unlock(lock) do {} while (0) +static __always_inline bool spinlock_try_lock(struct spinlock *lock) +{ + return true; +} + +static __always_inline bool spinlock_is_locked(struct spinlock *lock) +{ + return false; +} + +static __always_inline void spin_lock(struct spinlock *lock) { } +static __always_inline void spin_unlock(struct spinlock *lock) { } + #endif
#endif /* SMP_SPINLOCK_H */