v3 uses all combinations of __asm__, asm, __volatile__, volatile and single variations to declare inline asm statements. "asm" is a GNU C extension, while volatile is ANSI C. That means: - __volatile__ can be replaced by volatile unless you use a pure K&R compiler. - asm is not a reserved keyword and should be replaced by __asm__. As a bonus, grepping for __asm__ returns less hits than asm because asm is also used as a normal word in comments.
Signed-off-by: Carl-Daniel Hailfinger c-d.hailfinger.devel.2006@gmx.net
Index: corebootv3-inline_asm_consistent_underscores/include/arch/x86/msr.h =================================================================== --- corebootv3-inline_asm_consistent_underscores/include/arch/x86/msr.h (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/include/arch/x86/msr.h (Arbeitskopie) @@ -36,7 +36,7 @@ static inline struct msr rdmsr(u32 index) { struct msr result; - __asm__ __volatile__ ( + __asm__ volatile ( "rdmsr" : "=a" (result.lo), "=d" (result.hi) : "c" (index) @@ -46,7 +46,7 @@
static inline void wrmsr(u32 index, struct msr msr) { - __asm__ __volatile__ ( + __asm__ volatile ( "wrmsr" : /* No outputs */ : "c" (index), "a" (msr.lo), "d" (msr.hi) Index: corebootv3-inline_asm_consistent_underscores/include/arch/x86/io.h =================================================================== --- corebootv3-inline_asm_consistent_underscores/include/arch/x86/io.h (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/include/arch/x86/io.h (Arbeitskopie) @@ -28,43 +28,43 @@
static inline void outb(u8 value, u16 port) { - __asm__ __volatile__ ("outb %b0, %w1" : : "a" (value), "Nd" (port)); + __asm__ volatile ("outb %b0, %w1" : : "a" (value), "Nd" (port)); }
static inline void outw(u16 value, u16 port) { - __asm__ __volatile__ ("outw %w0, %w1" : : "a" (value), "Nd" (port)); + __asm__ volatile ("outw %w0, %w1" : : "a" (value), "Nd" (port)); }
static inline void outl(u32 value, u16 port) { - __asm__ __volatile__ ("outl %0, %w1" : : "a" (value), "Nd" (port)); + __asm__ volatile ("outl %0, %w1" : : "a" (value), "Nd" (port)); }
static inline u8 inb(u16 port) { u8 value; - __asm__ __volatile__ ("inb %w1, %b0" : "=a"(value) : "Nd" (port)); + __asm__ volatile ("inb %w1, %b0" : "=a"(value) : "Nd" (port)); return value; }
static inline u16 inw(u16 port) { u16 value; - __asm__ __volatile__ ("inw %w1, %w0" : "=a"(value) : "Nd" (port)); + __asm__ volatile ("inw %w1, %w0" : "=a"(value) : "Nd" (port)); return value; }
static inline u32 inl(u16 port) { u32 value; - __asm__ __volatile__ ("inl %w1, %0" : "=a"(value) : "Nd" (port)); + __asm__ volatile ("inl %w1, %0" : "=a"(value) : "Nd" (port)); return value; }
static inline void outsb(u16 port, const void *addr, unsigned long count) { - __asm__ __volatile__ ( + __asm__ volatile ( "cld ; rep ; outsb " : "=S" (addr), "=c" (count) : "d"(port), "0"(addr), "1" (count) @@ -73,7 +73,7 @@
static inline void outsw(u16 port, const void *addr, unsigned long count) { - __asm__ __volatile__ ( + __asm__ volatile ( "cld ; rep ; outsw " : "=S" (addr), "=c" (count) : "d"(port), "0"(addr), "1" (count) @@ -82,7 +82,7 @@
static inline void outsl(u16 port, const void *addr, unsigned long count) { - __asm__ __volatile__ ( + __asm__ volatile ( "cld ; rep ; outsl " : "=S" (addr), "=c" (count) : "d"(port), "0"(addr), "1" (count) @@ -92,7 +92,7 @@
static inline void insb(u16 port, void *addr, unsigned long count) { - __asm__ __volatile__ ( + __asm__ volatile ( "cld ; rep ; insb " : "=D" (addr), "=c" (count) : "d"(port), "0"(addr), "1" (count) @@ -101,7 +101,7 @@
static inline void insw(u16 port, void *addr, unsigned long count) { - __asm__ __volatile__ ( + __asm__ volatile ( "cld ; rep ; insw " : "=D" (addr), "=c" (count) : "d"(port), "0"(addr), "1" (count) @@ -110,7 +110,7 @@
static inline void insl(u16 port, void *addr, unsigned long count) { - __asm__ __volatile__ ( + __asm__ volatile ( "cld ; rep ; insl " : "=D" (addr), "=c" (count) : "d"(port), "0"(addr), "1" (count) Index: corebootv3-inline_asm_consistent_underscores/include/arch/x86/arch/spinlock.h =================================================================== --- corebootv3-inline_asm_consistent_underscores/include/arch/x86/arch/spinlock.h (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/include/arch/x86/arch/spinlock.h (Arbeitskopie) @@ -29,7 +29,7 @@ * * We make no fairness assumptions. They have a cost. */ -#define barrier() __asm__ __volatile__("": : :"memory") +#define barrier() __asm__ volatile("": : :"memory") #define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
@@ -53,14 +53,14 @@
static inline __attribute__((always_inline)) void spin_lock(struct spinlock *lock) { - __asm__ __volatile__( + __asm__ volatile( spin_lock_string :"=m" (lock->lock) : : "memory"); }
static inline __attribute__((always_inline)) void spin_unlock(struct spinlock *lock) { - __asm__ __volatile__( + __asm__ volatile( spin_unlock_string :"=m" (lock->lock) : : "memory"); } Index: corebootv3-inline_asm_consistent_underscores/include/arch/x86/cpu.h =================================================================== --- corebootv3-inline_asm_consistent_underscores/include/arch/x86/cpu.h (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/include/arch/x86/cpu.h (Arbeitskopie) @@ -149,23 +149,23 @@ static inline unsigned long read_cr0(void) { unsigned long cr0; - asm volatile("movl %%cr0, %0" : "=r" (cr0)); + __asm__ volatile("movl %%cr0, %0" : "=r" (cr0)); return cr0; }
static inline void write_cr0(unsigned long cr0) { - asm volatile("movl %0, %%cr0" : : "r" (cr0)); + __asm__ volatile("movl %0, %%cr0" : : "r" (cr0)); }
static inline void invd(void) { - asm volatile("invd" : : : "memory"); + __asm__ volatile("invd" : : : "memory"); }
static inline void wbinvd(void) { - asm volatile("wbinvd"); + __asm__ volatile("wbinvd"); }
static inline void enable_cache(void) @@ -194,7 +194,7 @@ */ static inline void cpu_relax(void) { - __asm__ __volatile__("rep; nop" : : : "memory"); + __asm__ volatile("rep; nop" : : : "memory"); }
/** @@ -206,7 +206,7 @@ */ static inline __attribute__((always_inline)) void hlt(void) { - __asm__ __volatile__("hlt" : : : "memory"); + __asm__ volatile("hlt" : : : "memory"); }
/** @@ -216,7 +216,7 @@ */ static inline void clear_memory(void *addr, unsigned long size) { - asm volatile( + __asm__ volatile( "cld \n\t" "rep; stosl\n\t" : /* No outputs */ Index: corebootv3-inline_asm_consistent_underscores/include/arch/x86/lapic.h =================================================================== --- corebootv3-inline_asm_consistent_underscores/include/arch/x86/lapic.h (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/include/arch/x86/lapic.h (Arbeitskopie) @@ -93,19 +93,19 @@ { switch (size) { case 1: - __asm__ __volatile__("xchgb %b0,%1" + __asm__ volatile("xchgb %b0,%1" :"=q" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 2: - __asm__ __volatile__("xchgw %w0,%1" + __asm__ volatile("xchgw %w0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 4: - __asm__ __volatile__("xchgl %0,%1" + __asm__ volatile("xchgl %0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); Index: corebootv3-inline_asm_consistent_underscores/northbridge/amd/k8/dqs.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/northbridge/amd/k8/dqs.c (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/northbridge/amd/k8/dqs.c (Arbeitskopie) @@ -139,13 +139,13 @@ static inline unsigned long read_cr4(void) { unsigned long cr4; - asm volatile ("movl %%cr4, %0" : "=r" (cr4)); + __asm__ volatile ("movl %%cr4, %0" : "=r" (cr4)); return cr4; }
static inline void write_cr4(unsigned long cr4) { - asm volatile ("movl %0, %%cr4" : : "r" (cr4)); + __asm__ volatile ("movl %0, %%cr4" : : "r" (cr4)); }
Index: corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/raminit.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/raminit.c (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/raminit.c (Arbeitskopie) @@ -815,7 +815,7 @@ post_code(POST_MEM_SETUP_GOOD);
/* Make sure there is nothing stale in the cache. */ - /* CAR stack is in the cache __asm__ __volatile__("wbinvd\n"); */ + /* CAR stack is in the cache __asm__ volatile("wbinvd\n"); */
/* The RAM dll needs a write to lock on so generate a few dummy * writes. Note: The descriptor needs to be enabled to point at memory. Index: corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/vsmsetup.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/vsmsetup.c (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/vsmsetup.c (Arbeitskopie) @@ -50,7 +50,7 @@ u16 entryHi = (VSA2_ENTRY_POINT & 0xffff0000) >> 4; u16 entryLo = (VSA2_ENTRY_POINT & 0xffff);
- __asm__ __volatile__( + __asm__ volatile( /* Paranoia -- does ecx get saved? not sure. This is * the easiest safe thing to do. */ @@ -148,7 +148,7 @@ u32 VSA_vrRead(u16 classIndex) { unsigned eax, ebx, ecx, edx; - asm volatile ( + __asm__ volatile ( "movw $0x0AC1C, %%dx \n" "orl $0x0FC530000, %%eax \n" "outl %%eax, %%dx \n" @@ -164,7 +164,7 @@ u32 VSA_msrRead(u32 msrAddr) { unsigned eax, ebx, ecx, edx; - asm volatile ( + __asm__ volatile ( "movw $0x0AC1C, %%dx \n" "movl $0x0FC530007, %%eax \n" "outl %%eax, %%dx \n" Index: corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/geodelxinit.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/geodelxinit.c (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/northbridge/amd/geodelx/geodelxinit.c (Arbeitskopie) @@ -758,7 +758,7 @@ GLPCI_init(); clock_gating_init();
- __asm__ __volatile__("FINIT\n"); /* TODO: Create finit() function? */ + __asm__ volatile("FINIT\n"); /* TODO: Create finit() function? */
printk(BIOS_DEBUG, "Exit %s\n", __FUNCTION__); } Index: corebootv3-inline_asm_consistent_underscores/arch/x86/geodelx/stage1.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/arch/x86/geodelx/stage1.c (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/arch/x86/geodelx/stage1.c (Arbeitskopie) @@ -65,8 +65,8 @@ * the data back over itself, and the wbinvd should then * flush to memory. Let's see. */ - __asm__ __volatile__("cld; rep movsl" ::"D" (DCACHE_RAM_BASE), "S" (DCACHE_RAM_BASE), "c" (DCACHE_RAM_SIZE/4): "memory"); - __asm__ __volatile__ ("wbinvd\n"); + __asm__ volatile("cld; rep movsl" ::"D" (DCACHE_RAM_BASE), "S" (DCACHE_RAM_BASE), "c" (DCACHE_RAM_SIZE/4): "memory"); + __asm__ volatile ("wbinvd\n"); banner(BIOS_DEBUG, "Disable_car: done wbinvd"); northbridge_init_early(); banner(BIOS_DEBUG, "disable_car: done"); Index: corebootv3-inline_asm_consistent_underscores/arch/x86/archtables.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/arch/x86/archtables.c (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/arch/x86/archtables.c (Arbeitskopie) @@ -54,7 +54,7 @@ memcpy((void*)newgdt, &gdt, num_gdt_bytes); gdtarg.base = newgdt; gdtarg.limit = num_gdt_bytes - 1; - __asm__ __volatile__ ("lgdt %0\n\t" : : "m" (gdtarg)); + __asm__ volatile ("lgdt %0\n\t" : : "m" (gdtarg)); printk(BIOS_DEBUG,"OK\n"); } #endif Index: corebootv3-inline_asm_consistent_underscores/arch/x86/amd/k8/stage1.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/arch/x86/amd/k8/stage1.c (Revision 858) +++ corebootv3-inline_asm_consistent_underscores/arch/x86/amd/k8/stage1.c (Arbeitskopie) @@ -45,7 +45,7 @@ * The solution for geode of using a inline asm memcpy of the stack * onto itself will not mark the cache tags as dirty on K8. */ - __asm__ __volatile__( + __asm__ volatile( " movl %[carbase], %%esi \n" " movl %[backuplocation], %%edi \n" " movl %[carsizequads], %%ecx \n" Index: corebootv3-inline_asm_consistent_underscores/arch/x86/stage1.c =================================================================== --- corebootv3-inline_asm_consistent_underscores/arch/x86/stage1.c (Revision 856) +++ corebootv3-inline_asm_consistent_underscores/arch/x86/stage1.c (Arbeitskopie) @@ -117,7 +117,7 @@ u64 cycles(void) { u64 ret; - asm volatile ("rdtsc" : "=A" (ret)); + __asm__ volatile ("rdtsc" : "=A" (ret)); return ret; }