From c058d1c0f8c04a0fca778f70701d7f903754b0b6 Mon Sep 17 00:00:00 2001 From: Logan Carlson Date: Tue, 30 May 2017 15:31:10 -0600 Subject: arch/arm: Correct checkpatch errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Correct whitespace issues in arch/arm and arch/arm64. Enclose complex values in parenthesis. Change-Id: I74b68f485adff1e6f0fa433e51e12b59ccea654b Signed-off-by: Logan Carlson Reviewed-on: https://review.coreboot.org/19989 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Philipp Deppenwiese --- src/arch/arm/div0.c | 2 +- src/arch/arm/eabi_compat.c | 4 ++-- src/arch/arm/include/armv4/arch/smp/spinlock.h | 10 +++++----- src/arch/arm/include/armv7.h | 6 +++--- src/arch/arm/include/smp/spinlock.h | 4 ++-- src/arch/arm64/armv8/cache.c | 2 +- src/arch/arm64/armv8/lib/cache.c | 22 +++++++++++----------- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/src/arch/arm/div0.c b/src/arch/arm/div0.c index afd9dad1f3..e474f73d6e 100644 --- a/src/arch/arm/div0.c +++ b/src/arch/arm/div0.c @@ -19,7 +19,7 @@ void __div0(void); // called from asm so no need for a prototype in a header /* Replacement (=dummy) for GNU/Linux division-by zero handler */ /* recursion is ok here because we have no formats ... */ -void __div0 (void) +void __div0(void) { printk(BIOS_EMERG, "DIVIDE BY ZERO! continuing ...\n"); } diff --git a/src/arch/arm/eabi_compat.c b/src/arch/arm/eabi_compat.c index e49f199ce7..15f7d36667 100644 --- a/src/arch/arm/eabi_compat.c +++ b/src/arch/arm/eabi_compat.c @@ -20,8 +20,8 @@ #include /* FIXME(dhendrix): prototypes added for assembler */ -int raise (int signum) __attribute__((used)); -int raise (int signum) +int raise(int signum) __attribute__((used)); +int raise(int signum) { printk(BIOS_CRIT, "raise: Signal # %d caught\n", signum); return 0; diff --git a/src/arch/arm/include/armv4/arch/smp/spinlock.h b/src/arch/arm/include/armv4/arch/smp/spinlock.h index 6c5f6e88d9..e49dc4440a 100644 --- a/src/arch/arm/include/armv4/arch/smp/spinlock.h +++ b/src/arch/arm/include/armv4/arch/smp/spinlock.h @@ -15,12 +15,12 @@ #define _ARCH_SMP_SPINLOCK_H #define DECLARE_SPIN_LOCK(x) -#define barrier() do {} while(0) +#define barrier() do {} while (0) #define spin_is_locked(lock) 0 -#define spin_unlock_wait(lock) do {} while(0) -#define spin_lock(lock) do {} while(0) -#define spin_unlock(lock) do {} while(0) -#define cpu_relax() do {} while(0) +#define spin_unlock_wait(lock) do {} while (0) +#define spin_lock(lock) do {} while (0) +#define spin_unlock(lock) do {} while (0) +#define cpu_relax() do {} while (0) #include #define boot_cpu() 1 diff --git a/src/arch/arm/include/armv7.h b/src/arch/arm/include/armv7.h index 6622a6f9d4..bec7fd7033 100644 --- a/src/arch/arm/include/armv7.h +++ b/src/arch/arm/include/armv7.h @@ -54,8 +54,8 @@ * However, we use the CP15 based instructions because we use * -march=armv5 in U-Boot */ -#define CP15ISB asm volatile ("mcr p15, 0, %0, c7, c5, 4" : : "r" (0)) -#define CP15DSB asm volatile ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)) -#define CP15DMB asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0)) +#define CP15ISB (asm volatile ("mcr p15, 0, %0, c7, c5, 4" : : "r" (0))) +#define CP15DSB (asm volatile ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0))) +#define CP15DMB (asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0))) #endif /* ARMV7_H */ diff --git a/src/arch/arm/include/smp/spinlock.h b/src/arch/arm/include/smp/spinlock.h index a8f9c77c73..f98900a66b 100644 --- a/src/arch/arm/include/smp/spinlock.h +++ b/src/arch/arm/include/smp/spinlock.h @@ -29,9 +29,9 @@ typedef struct { #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } #define DECLARE_SPIN_LOCK(x) static spinlock_t x = SPIN_LOCK_UNLOCKED; -#define barrier() __asm__ __volatile__("": : :"memory") +#define barrier() (__asm__ __volatile__("" : : : "memory")) #define spin_is_locked(x) (*(volatile char *)(&(x)->lock) != 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) +#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock) { diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c index 4b99cd7830..53aefe0bc4 100644 --- a/src/arch/arm64/armv8/cache.c +++ b/src/arch/arm64/armv8/cache.c @@ -85,7 +85,7 @@ static void dcache_op_va(void const *addr, size_t len, enum dcache_op op) dsb(); while ((void *)line < addr + len) { - switch(op) { + switch (op) { case OP_DCCIVAC: dccivac(line); break; diff --git a/src/arch/arm64/armv8/lib/cache.c b/src/arch/arm64/armv8/lib/cache.c index b4ecda656b..0c621ef96d 100644 --- a/src/arch/arm64/armv8/lib/cache.c +++ b/src/arch/arm64/armv8/lib/cache.c @@ -23,55 +23,55 @@ void dccisw(uint64_t cisw) { - __asm__ __volatile__("dc cisw, %0\n\t" : : "r" (cisw) :"memory"); + __asm__ __volatile__("dc cisw, %0\n\t" : : "r" (cisw) : "memory"); } void dccivac(uint64_t civac) { - __asm__ __volatile__("dc civac, %0\n\t" : : "r" (civac) :"memory"); + __asm__ __volatile__("dc civac, %0\n\t" : : "r" (civac) : "memory"); } void dccsw(uint64_t csw) { - __asm__ __volatile__("dc csw, %0\n\t" : : "r" (csw) :"memory"); + __asm__ __volatile__("dc csw, %0\n\t" : : "r" (csw) : "memory"); } void dccvac(uint64_t cvac) { - __asm__ __volatile__("dc cvac, %0\n\t" : : "r" (cvac) :"memory"); + __asm__ __volatile__("dc cvac, %0\n\t" : : "r" (cvac) : "memory"); } void dccvau(uint64_t cvau) { - __asm__ __volatile__("dc cvau, %0\n\t" : : "r" (cvau) :"memory"); + __asm__ __volatile__("dc cvau, %0\n\t" : : "r" (cvau) : "memory"); } void dcisw(uint64_t isw) { - __asm__ __volatile__("dc isw, %0\n\t" : : "r" (isw) :"memory"); + __asm__ __volatile__("dc isw, %0\n\t" : : "r" (isw) : "memory"); } void dcivac(uint64_t ivac) { - __asm__ __volatile__("dc ivac, %0\n\t" : : "r" (ivac) :"memory"); + __asm__ __volatile__("dc ivac, %0\n\t" : : "r" (ivac) : "memory"); } void dczva(uint64_t zva) { - __asm__ __volatile__("dc zva, %0\n\t" : : "r" (zva) :"memory"); + __asm__ __volatile__("dc zva, %0\n\t" : : "r" (zva) : "memory"); } void iciallu(void) { - __asm__ __volatile__("ic iallu\n\t" : : :"memory"); + __asm__ __volatile__("ic iallu\n\t" : : : "memory"); } void icialluis(void) { - __asm__ __volatile__("ic ialluis\n\t" : : :"memory"); + __asm__ __volatile__("ic ialluis\n\t" : : : "memory"); } void icivau(uint64_t ivau) { - __asm__ __volatile__("ic ivau, %0\n\t" : : "r" (ivau) :"memory"); + __asm__ __volatile__("ic ivau, %0\n\t" : : "r" (ivau) : "memory"); } -- cgit v1.2.3