diff options
author | Aaron Durbin <adurbin@chromium.org> | 2018-09-13 02:10:45 -0600 |
---|---|---|
committer | Aaron Durbin <adurbin@chromium.org> | 2018-09-14 08:16:37 +0000 |
commit | 75a62e76486f63f6dadb5492c205570ace81e9d5 (patch) | |
tree | c3338d2ddd7b2f9f51f35432a24087fc289999fb /src/include | |
parent | cf9ea55473cde8b9a2b9494eca452df7783376e5 (diff) |
complier.h: add __always_inline and use it in code base
Add a __always_inline macro that wraps __attribute__((always_inline))
and replace current users with the macro, excluding files under
src/vendorcode.
Change-Id: Ic57e474c1d2ca7cc0405ac677869f78a28d3e529
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: https://review.coreboot.org/28587
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Julius Werner <jwerner@google.com>
Diffstat (limited to 'src/include')
-rw-r--r-- | src/include/compiler.h | 1 | ||||
-rw-r--r-- | src/include/cpu/amd/mtrr.h | 6 | ||||
-rw-r--r-- | src/include/cpu/x86/cache.h | 9 | ||||
-rw-r--r-- | src/include/cpu/x86/cr.h | 15 | ||||
-rw-r--r-- | src/include/cpu/x86/lapic.h | 13 | ||||
-rw-r--r-- | src/include/cpu/x86/msr.h | 16 | ||||
-rw-r--r-- | src/include/device/pci_ops.h | 13 |
7 files changed, 38 insertions, 35 deletions
diff --git a/src/include/compiler.h b/src/include/compiler.h index 4df1cab7e6..96c2045e4d 100644 --- a/src/include/compiler.h +++ b/src/include/compiler.h @@ -27,5 +27,6 @@ #define __must_check __attribute__((warn_unused_result)) #define __weak __attribute__((weak)) #define __noreturn __attribute__((noreturn)) +#define __always_inline inline __attribute__((always_inline)) #endif diff --git a/src/include/cpu/amd/mtrr.h b/src/include/cpu/amd/mtrr.h index b31648d0af..9661b636cc 100644 --- a/src/include/cpu/amd/mtrr.h +++ b/src/include/cpu/amd/mtrr.h @@ -38,13 +38,14 @@ #if !defined(__PRE_RAM__) && !defined(__ASSEMBLER__) +#include <compiler.h> #include <cpu/x86/msr.h> void amd_setup_mtrrs(void); struct device; void add_uma_resource_below_tolm(struct device *nb, int idx); -static inline __attribute__((always_inline)) msr_t rdmsr_amd(unsigned int index) +static __always_inline msr_t rdmsr_amd(unsigned int index) { msr_t result; __asm__ __volatile__ ( @@ -55,8 +56,7 @@ static inline __attribute__((always_inline)) msr_t rdmsr_amd(unsigned int index) return result; } -static inline __attribute__((always_inline)) void wrmsr_amd(unsigned int index, - msr_t msr) +static __always_inline void wrmsr_amd(unsigned int index, msr_t msr) { __asm__ __volatile__ ( "wrmsr" diff --git a/src/include/cpu/x86/cache.h b/src/include/cpu/x86/cache.h index e0a335971b..81d2ae7223 100644 --- a/src/include/cpu/x86/cache.h +++ b/src/include/cpu/x86/cache.h @@ -16,6 +16,7 @@ #ifndef CPU_X86_CACHE #define CPU_X86_CACHE +#include <compiler.h> #include <cpu/x86/cr.h> #define CR0_CacheDisable (CR0_CD) @@ -55,7 +56,7 @@ static inline void clflush(void *addr) asm volatile ("clflush (%0)"::"r" (addr)); } -/* The following functions require the always_inline due to AMD +/* The following functions require the __always_inline due to AMD * function STOP_CAR_AND_CPU that disables cache as * RAM, the cache as RAM stack can no longer be used. Called * functions must be inlined to avoid stack usage. Also, the @@ -63,9 +64,9 @@ static inline void clflush(void *addr) * allocated them from the stack. With gcc 4.5.0, some functions * declared as inline are not being inlined. This patch forces * these functions to always be inlined by adding the qualifier - * __attribute__((always_inline)) to their declaration. + * __always_inline to their declaration. */ -static inline __attribute__((always_inline)) void enable_cache(void) +static __always_inline void enable_cache(void) { unsigned long cr0; cr0 = read_cr0(); @@ -73,7 +74,7 @@ static inline __attribute__((always_inline)) void enable_cache(void) write_cr0(cr0); } -static inline __attribute__((always_inline)) void disable_cache(void) +static __always_inline void disable_cache(void) { /* Disable and write back the cache */ unsigned long cr0; diff --git a/src/include/cpu/x86/cr.h b/src/include/cpu/x86/cr.h index 5183c77f89..229a67d422 100644 --- a/src/include/cpu/x86/cr.h +++ b/src/include/cpu/x86/cr.h @@ -18,6 +18,7 @@ #if !defined(__ASSEMBLER__) +#include <compiler.h> #include <stdint.h> #include <arch/cpu.h> @@ -37,7 +38,7 @@ #define CRx_IN "r" #define CRx_RET "=r" #endif -static alwaysinline CRx_TYPE read_cr0(void) +static __always_inline CRx_TYPE read_cr0(void) { CRx_TYPE value; __asm__ __volatile__ ( @@ -49,7 +50,7 @@ static alwaysinline CRx_TYPE read_cr0(void) return value; } -static alwaysinline void write_cr0(CRx_TYPE data) +static __always_inline void write_cr0(CRx_TYPE data) { __asm__ __volatile__ ( "mov %0, %%cr0" @@ -59,7 +60,7 @@ static alwaysinline void write_cr0(CRx_TYPE data) ); } -static alwaysinline CRx_TYPE read_cr2(void) +static __always_inline CRx_TYPE read_cr2(void) { CRx_TYPE value; __asm__ __volatile__ ( @@ -71,7 +72,7 @@ static alwaysinline CRx_TYPE read_cr2(void) return value; } -static alwaysinline CRx_TYPE read_cr3(void) +static __always_inline CRx_TYPE read_cr3(void) { CRx_TYPE value; __asm__ __volatile__ ( @@ -83,7 +84,7 @@ static alwaysinline CRx_TYPE read_cr3(void) return value; } -static alwaysinline void write_cr3(CRx_TYPE data) +static __always_inline void write_cr3(CRx_TYPE data) { __asm__ __volatile__ ( "mov %0, %%cr3" @@ -92,7 +93,7 @@ static alwaysinline void write_cr3(CRx_TYPE data) : COMPILER_BARRIER ); } -static alwaysinline CRx_TYPE read_cr4(void) +static __always_inline CRx_TYPE read_cr4(void) { CRx_TYPE value; __asm__ __volatile__ ( @@ -104,7 +105,7 @@ static alwaysinline CRx_TYPE read_cr4(void) return value; } -static alwaysinline void write_cr4(CRx_TYPE data) +static __always_inline void write_cr4(CRx_TYPE data) { __asm__ __volatile__ ( "mov %0, %%cr4" diff --git a/src/include/cpu/x86/lapic.h b/src/include/cpu/x86/lapic.h index 6121230a22..5ac3c5e2e8 100644 --- a/src/include/cpu/x86/lapic.h +++ b/src/include/cpu/x86/lapic.h @@ -1,24 +1,23 @@ #ifndef CPU_X86_LAPIC_H #define CPU_X86_LAPIC_H +#include <compiler.h> #include <cpu/x86/lapic_def.h> #include <cpu/x86/msr.h> #include <halt.h> #include <smp/node.h> -static inline __attribute__((always_inline)) unsigned long lapic_read( - unsigned long reg) +static __always_inline unsigned long lapic_read(unsigned long reg) { return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)); } -static inline __attribute__((always_inline)) void lapic_write(unsigned long reg, - unsigned long v) +static __always_inline void lapic_write(unsigned long reg, unsigned long v) { *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v; } -static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void) +static __always_inline void lapic_wait_icr_idle(void) { do { } while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY); } @@ -42,7 +41,7 @@ static inline void disable_lapic(void) wrmsr(LAPIC_BASE_MSR, msr); } -static inline __attribute__((always_inline)) unsigned long lapicid(void) +static __always_inline unsigned long lapicid(void) { return lapic_read(LAPIC_ID) >> 24; } @@ -51,7 +50,7 @@ static inline __attribute__((always_inline)) unsigned long lapicid(void) /* If we need to go back to sipi wait, we use the long non-inlined version of * this function in lapic_cpu_init.c */ -static inline __attribute__((always_inline)) void stop_this_cpu(void) +static __always_inline void stop_this_cpu(void) { /* Called by an AP when it is ready to halt and wait for a new task */ halt(); diff --git a/src/include/cpu/x86/msr.h b/src/include/cpu/x86/msr.h index 74c2521af5..290c54a499 100644 --- a/src/include/cpu/x86/msr.h +++ b/src/include/cpu/x86/msr.h @@ -1,6 +1,8 @@ #ifndef CPU_X86_MSR_H #define CPU_X86_MSR_H +#include <compiler.h> + /* Intel SDM: Table 2-1 * IA-32 architectural MSR: Extended Feature Enable Register */ @@ -50,19 +52,18 @@ msr_t soc_msr_read(unsigned int index); void soc_msr_write(unsigned int index, msr_t msr); /* Handle MSR references in the other source code */ -static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index) +static __always_inline msr_t rdmsr(unsigned int index) { return soc_msr_read(index); } -static inline __attribute__((always_inline)) void wrmsr(unsigned int index, - msr_t msr) +static __always_inline void wrmsr(unsigned int index, msr_t msr) { soc_msr_write(index, msr); } #else /* CONFIG_SOC_SETS_MSRS */ -/* The following functions require the always_inline due to AMD +/* The following functions require the __always_inline due to AMD * function STOP_CAR_AND_CPU that disables cache as * RAM, the cache as RAM stack can no longer be used. Called * functions must be inlined to avoid stack usage. Also, the @@ -70,9 +71,9 @@ static inline __attribute__((always_inline)) void wrmsr(unsigned int index, * allocated them from the stack. With gcc 4.5.0, some functions * declared as inline are not being inlined. This patch forces * these functions to always be inlined by adding the qualifier - * __attribute__((always_inline)) to their declaration. + * __always_inline to their declaration. */ -static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index) +static __always_inline msr_t rdmsr(unsigned int index) { msr_t result; __asm__ __volatile__ ( @@ -83,8 +84,7 @@ static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index) return result; } -static inline __attribute__((always_inline)) void wrmsr(unsigned int index, - msr_t msr) +static __always_inline void wrmsr(unsigned int index, msr_t msr) { __asm__ __volatile__ ( "wrmsr" diff --git a/src/include/device/pci_ops.h b/src/include/device/pci_ops.h index 2518f2002d..9e9baa0d60 100644 --- a/src/include/device/pci_ops.h +++ b/src/include/device/pci_ops.h @@ -1,6 +1,7 @@ #ifndef PCI_OPS_H #define PCI_OPS_H +#include <compiler.h> #include <stdint.h> #include <device/device.h> #include <arch/pci_ops.h> @@ -19,28 +20,28 @@ void pci_write_config32(struct device *dev, unsigned int where, u32 val); * Use device_t here as the functions are to be used with either * __SIMPLE_DEVICE__ defined or undefined. */ -static inline __attribute__((always_inline)) +static __always_inline void pci_or_config8(device_t dev, unsigned int where, u8 ormask) { u8 value = pci_read_config8(dev, where); pci_write_config8(dev, where, value | ormask); } -static inline __attribute__((always_inline)) +static __always_inline void pci_or_config16(device_t dev, unsigned int where, u16 ormask) { u16 value = pci_read_config16(dev, where); pci_write_config16(dev, where, value | ormask); } -static inline __attribute__((always_inline)) +static __always_inline void pci_or_config32(device_t dev, unsigned int where, u32 ormask) { u32 value = pci_read_config32(dev, where); pci_write_config32(dev, where, value | ormask); } -static inline __attribute__((always_inline)) +static __always_inline void pci_update_config8(device_t dev, int reg, u8 mask, u8 or) { u8 reg8; @@ -51,7 +52,7 @@ void pci_update_config8(device_t dev, int reg, u8 mask, u8 or) pci_write_config8(dev, reg, reg8); } -static inline __attribute__((always_inline)) +static __always_inline void pci_update_config16(device_t dev, int reg, u16 mask, u16 or) { u16 reg16; @@ -62,7 +63,7 @@ void pci_update_config16(device_t dev, int reg, u16 mask, u16 or) pci_write_config16(dev, reg, reg16); } -static inline __attribute__((always_inline)) +static __always_inline void pci_update_config32(device_t dev, int reg, u32 mask, u32 or) { u32 reg32; |