Logan Carlson has uploaded a new change for review. ( https://review.coreboot.org/19989 )
Change subject: arch/arm: Correct checkpatch errors ......................................................................
arch/arm: Correct checkpatch errors
Correct whitespace issues in arch/arm and arch/arm64. Enclose complex values in parenthesis.
Change-Id: I74b68f485adff1e6f0fa433e51e12b59ccea654b Signed-off-by: Logan Carlson logancarlson@google.com --- M src/arch/arm/div0.c M src/arch/arm/eabi_compat.c M src/arch/arm/include/armv4/arch/smp/spinlock.h M src/arch/arm/include/armv7.h M src/arch/arm/include/smp/spinlock.h M src/arch/arm64/armv8/cache.c M src/arch/arm64/armv8/lib/cache.c 7 files changed, 25 insertions(+), 25 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/89/19989/1
diff --git a/src/arch/arm/div0.c b/src/arch/arm/div0.c index afd9dad..e474f73 100644 --- a/src/arch/arm/div0.c +++ b/src/arch/arm/div0.c @@ -19,7 +19,7 @@
/* Replacement (=dummy) for GNU/Linux division-by zero handler */ /* recursion is ok here because we have no formats ... */ -void __div0 (void) +void __div0(void) { printk(BIOS_EMERG, "DIVIDE BY ZERO! continuing ...\n"); } diff --git a/src/arch/arm/eabi_compat.c b/src/arch/arm/eabi_compat.c index e49f199..15f7d36 100644 --- a/src/arch/arm/eabi_compat.c +++ b/src/arch/arm/eabi_compat.c @@ -20,8 +20,8 @@ #include <console/console.h>
/* FIXME(dhendrix): prototypes added for assembler */ -int raise (int signum) __attribute__((used)); -int raise (int signum) +int raise(int signum) __attribute__((used)); +int raise(int signum) { printk(BIOS_CRIT, "raise: Signal # %d caught\n", signum); return 0; diff --git a/src/arch/arm/include/armv4/arch/smp/spinlock.h b/src/arch/arm/include/armv4/arch/smp/spinlock.h index 6c5f6e8..e49dc44 100644 --- a/src/arch/arm/include/armv4/arch/smp/spinlock.h +++ b/src/arch/arm/include/armv4/arch/smp/spinlock.h @@ -15,12 +15,12 @@ #define _ARCH_SMP_SPINLOCK_H
#define DECLARE_SPIN_LOCK(x) -#define barrier() do {} while(0) +#define barrier() do {} while (0) #define spin_is_locked(lock) 0 -#define spin_unlock_wait(lock) do {} while(0) -#define spin_lock(lock) do {} while(0) -#define spin_unlock(lock) do {} while(0) -#define cpu_relax() do {} while(0) +#define spin_unlock_wait(lock) do {} while (0) +#define spin_lock(lock) do {} while (0) +#define spin_unlock(lock) do {} while (0) +#define cpu_relax() do {} while (0)
#include <smp/node.h> #define boot_cpu() 1 diff --git a/src/arch/arm/include/armv7.h b/src/arch/arm/include/armv7.h index 6622a6f..bec7fd7 100644 --- a/src/arch/arm/include/armv7.h +++ b/src/arch/arm/include/armv7.h @@ -54,8 +54,8 @@ * However, we use the CP15 based instructions because we use * -march=armv5 in U-Boot */ -#define CP15ISB asm volatile ("mcr p15, 0, %0, c7, c5, 4" : : "r" (0)) -#define CP15DSB asm volatile ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)) -#define CP15DMB asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0)) +#define CP15ISB (asm volatile ("mcr p15, 0, %0, c7, c5, 4" : : "r" (0))) +#define CP15DSB (asm volatile ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0))) +#define CP15DMB (asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0)))
#endif /* ARMV7_H */ diff --git a/src/arch/arm/include/smp/spinlock.h b/src/arch/arm/include/smp/spinlock.h index a8f9c77..f98900a 100644 --- a/src/arch/arm/include/smp/spinlock.h +++ b/src/arch/arm/include/smp/spinlock.h @@ -29,9 +29,9 @@ #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } #define DECLARE_SPIN_LOCK(x) static spinlock_t x = SPIN_LOCK_UNLOCKED;
-#define barrier() __asm__ __volatile__("": : :"memory") +#define barrier() (__asm__ __volatile__("" : : : "memory")) #define spin_is_locked(x) (*(volatile char *)(&(x)->lock) != 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) +#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock) { diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c index 4f91de0..68c7df2 100644 --- a/src/arch/arm64/armv8/cache.c +++ b/src/arch/arm64/armv8/cache.c @@ -86,7 +86,7 @@
dsb(); while ((void *)line < addr + len) { - switch(op) { + switch (op) { case OP_DCCIVAC: dccivac(line); break; diff --git a/src/arch/arm64/armv8/lib/cache.c b/src/arch/arm64/armv8/lib/cache.c index b4ecda6..0c621ef 100644 --- a/src/arch/arm64/armv8/lib/cache.c +++ b/src/arch/arm64/armv8/lib/cache.c @@ -23,55 +23,55 @@
void dccisw(uint64_t cisw) { - __asm__ __volatile__("dc cisw, %0\n\t" : : "r" (cisw) :"memory"); + __asm__ __volatile__("dc cisw, %0\n\t" : : "r" (cisw) : "memory"); }
void dccivac(uint64_t civac) { - __asm__ __volatile__("dc civac, %0\n\t" : : "r" (civac) :"memory"); + __asm__ __volatile__("dc civac, %0\n\t" : : "r" (civac) : "memory"); }
void dccsw(uint64_t csw) { - __asm__ __volatile__("dc csw, %0\n\t" : : "r" (csw) :"memory"); + __asm__ __volatile__("dc csw, %0\n\t" : : "r" (csw) : "memory"); }
void dccvac(uint64_t cvac) { - __asm__ __volatile__("dc cvac, %0\n\t" : : "r" (cvac) :"memory"); + __asm__ __volatile__("dc cvac, %0\n\t" : : "r" (cvac) : "memory"); }
void dccvau(uint64_t cvau) { - __asm__ __volatile__("dc cvau, %0\n\t" : : "r" (cvau) :"memory"); + __asm__ __volatile__("dc cvau, %0\n\t" : : "r" (cvau) : "memory"); }
void dcisw(uint64_t isw) { - __asm__ __volatile__("dc isw, %0\n\t" : : "r" (isw) :"memory"); + __asm__ __volatile__("dc isw, %0\n\t" : : "r" (isw) : "memory"); }
void dcivac(uint64_t ivac) { - __asm__ __volatile__("dc ivac, %0\n\t" : : "r" (ivac) :"memory"); + __asm__ __volatile__("dc ivac, %0\n\t" : : "r" (ivac) : "memory"); }
void dczva(uint64_t zva) { - __asm__ __volatile__("dc zva, %0\n\t" : : "r" (zva) :"memory"); + __asm__ __volatile__("dc zva, %0\n\t" : : "r" (zva) : "memory"); }
void iciallu(void) { - __asm__ __volatile__("ic iallu\n\t" : : :"memory"); + __asm__ __volatile__("ic iallu\n\t" : : : "memory"); }
void icialluis(void) { - __asm__ __volatile__("ic ialluis\n\t" : : :"memory"); + __asm__ __volatile__("ic ialluis\n\t" : : : "memory"); }
void icivau(uint64_t ivau) { - __asm__ __volatile__("ic ivau, %0\n\t" : : "r" (ivau) :"memory"); + __asm__ __volatile__("ic ivau, %0\n\t" : : "r" (ivau) : "memory"); }