From beba99045c7e7af21481d359384d06c77e636d53 Mon Sep 17 00:00:00 2001 From: Rudolf Marek Date: Sun, 16 May 2010 21:51:34 +0000 Subject: Following patch reworks car_disable into C. Tested, works here. I compared also the GCC generated code and it looks all right. Please test on some multicore CPU. I added the "memory" clobber to read_cr0 / write_cr0 function as it is in Linux Kernel. Seems that if this is missing, GCC is too smart and messes the order of reads/writes to CR0 (not tested if really a problem here, but be safe for future users of this function ;) Signed-off-by: Rudolf Marek Acked-by: Stefan Reinauer git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5562 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1 --- src/include/cpu/x86/cache.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'src/include/cpu/x86/cache.h') diff --git a/src/include/cpu/x86/cache.h b/src/include/cpu/x86/cache.h index f3ac2ed11f..14538592e9 100644 --- a/src/include/cpu/x86/cache.h +++ b/src/include/cpu/x86/cache.h @@ -20,16 +20,19 @@ #ifndef CPU_X86_CACHE #define CPU_X86_CACHE +/* the memory clobber prevents the GCC from reordering the read/write order + of CR0 */ + static inline unsigned long read_cr0(void) { unsigned long cr0; - asm volatile ("movl %%cr0, %0" : "=r" (cr0)); + asm volatile ("movl %%cr0, %0" : "=r" (cr0) :: "memory"); return cr0; } static inline void write_cr0(unsigned long cr0) { - asm volatile ("movl %0, %%cr0" : : "r" (cr0)); + asm volatile ("movl %0, %%cr0" : : "r" (cr0) : "memory"); } static inline void invd(void) @@ -39,7 +42,7 @@ static inline void invd(void) static inline void wbinvd(void) { - asm volatile ("wbinvd"); + asm volatile ("wbinvd" ::: "memory"); } static inline void enable_cache(void) -- cgit v1.2.3