From 86091f94b6ca58f4b8795503b274492d6a935c15 Mon Sep 17 00:00:00 2001 From: Alexandru Gagniuc Date: Wed, 30 Sep 2015 20:23:09 -0700 Subject: cpu/mtrr.h: Fix macro names for MTRR registers We use UNDERSCORE_CASE. For the MTRR macros that refer to an MSR, we also remove the _MSR suffix, as they are, by definition, MSRs. Change-Id: Id4483a75d62cf1b478a9105ee98a8f55140ce0ef Signed-off-by: Alexandru Gagniuc Reviewed-on: http://review.coreboot.org/11761 Reviewed-by: Aaron Durbin Tested-by: build bot (Jenkins) --- src/soc/intel/baytrail/bootblock/bootblock.c | 8 ++-- src/soc/intel/baytrail/cpu.c | 6 +-- src/soc/intel/baytrail/romstage/cache_as_ram.inc | 28 ++++++------- src/soc/intel/baytrail/romstage/romstage.c | 8 ++-- src/soc/intel/braswell/bootblock/bootblock.c | 8 ++-- src/soc/intel/braswell/cpu.c | 6 +-- src/soc/intel/broadwell/bootblock/cpu.c | 12 +++--- src/soc/intel/broadwell/include/soc/msr.h | 2 +- src/soc/intel/broadwell/romstage/cache_as_ram.inc | 38 ++++++++--------- src/soc/intel/broadwell/romstage/stack.c | 8 ++-- src/soc/intel/broadwell/smmrelocate.c | 12 +++--- src/soc/intel/common/stack.c | 8 ++-- src/soc/intel/common/util.c | 50 +++++++++++------------ src/soc/intel/fsp_baytrail/bootblock/bootblock.c | 8 ++-- src/soc/intel/fsp_baytrail/cpu.c | 6 +-- src/soc/intel/skylake/bootblock/cpu.c | 14 +++---- src/soc/intel/skylake/cpu.c | 2 +- src/soc/intel/skylake/include/soc/msr.h | 2 +- src/soc/intel/skylake/smmrelocate.c | 12 +++--- 19 files changed, 119 insertions(+), 119 deletions(-) (limited to 'src/soc/intel') diff --git a/src/soc/intel/baytrail/bootblock/bootblock.c b/src/soc/intel/baytrail/bootblock/bootblock.c index 6d31add508..f73ac46028 100644 --- a/src/soc/intel/baytrail/bootblock/bootblock.c +++ b/src/soc/intel/baytrail/bootblock/bootblock.c @@ -29,10 +29,10 @@ static void set_var_mtrr(int reg, uint32_t base, uint32_t size, int type) msr_t basem, maskm; basem.lo = base | type; basem.hi = 0; - wrmsr(MTRRphysBase_MSR(reg), basem); - maskm.lo = ~(size - 1) | MTRRphysMaskValid; + wrmsr(MTRR_PHYS_BASE(reg), basem); + maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID; maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1; - wrmsr(MTRRphysMask_MSR(reg), maskm); + wrmsr(MTRR_PHYS_MASK(reg), maskm); } static void enable_rom_caching(void) @@ -47,7 +47,7 @@ static void enable_rom_caching(void) /* Enable Variable MTRRs */ msr.hi = 0x00000000; msr.lo = 0x00000800; - wrmsr(MTRRdefType_MSR, msr); + wrmsr(MTRR_DEF_TYPE_MSR, msr); } static void setup_mmconfig(void) diff --git a/src/soc/intel/baytrail/cpu.c b/src/soc/intel/baytrail/cpu.c index 6b84c59711..81e04baad4 100644 --- a/src/soc/intel/baytrail/cpu.c +++ b/src/soc/intel/baytrail/cpu.c @@ -197,10 +197,10 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) /* Set up SMRR. */ smrr.lo = relo_attrs.smrr_base; smrr.hi = 0; - wrmsr(SMRRphysBase_MSR, smrr); + wrmsr(SMRR_PHYS_BASE, smrr); smrr.lo = relo_attrs.smrr_mask; smrr.hi = 0; - wrmsr(SMRRphysMask_MSR, smrr); + wrmsr(SMRR_PHYS_MASK, smrr); /* The relocated handler runs with all CPUs concurrently. Therefore * stagger the entry points adjusting SMBASE downwards by save state @@ -264,7 +264,7 @@ static int smm_load_handlers(void) relo_attrs.smbase = (uint32_t)smm_region_start(); relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK; relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask; - relo_attrs.smrr_mask |= MTRRphysMaskValid; + relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; /* Install handlers. */ if (install_relocation_handler(pattrs->num_cpus) < 0) { diff --git a/src/soc/intel/baytrail/romstage/cache_as_ram.inc b/src/soc/intel/baytrail/romstage/cache_as_ram.inc index 583ec5812b..21060291d8 100644 --- a/src/soc/intel/baytrail/romstage/cache_as_ram.inc +++ b/src/soc/intel/baytrail/romstage/cache_as_ram.inc @@ -60,7 +60,7 @@ wait_for_sipi: post_code(0x21) /* Configure the default memory type to uncacheable as well as disable * fixed and variable range mtrrs. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr andl $(~0x00000cff), %eax wrmsr @@ -95,34 +95,34 @@ wait_for_sipi: post_code(0x23) /* Set Cache-as-RAM base address. */ - movl $(MTRRphysBase_MSR(0)), %ecx + movl $(MTRR_PHYS_BASE(0)), %ecx movl $(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax xorl %edx, %edx wrmsr post_code(0x24) /* Set Cache-as-RAM mask. */ - movl $(MTRRphysMask_MSR(0)), %ecx - movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax + movl $(MTRR_PHYS_MASK(0)), %ecx + movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax movl $CPU_PHYSMASK_HI, %edx wrmsr post_code(0x25) /* Set code caching up for romstage. */ - movl $(MTRRphysBase_MSR(1)), %ecx + movl $(MTRR_PHYS_BASE(1)), %ecx movl $(CODE_CACHE_BASE | MTRR_TYPE_WRPROT), %eax xorl %edx, %edx wrmsr - movl $(MTRRphysMask_MSR(1)), %ecx - movl $(CODE_CACHE_MASK | MTRRphysMaskValid), %eax + movl $(MTRR_PHYS_MASK(1)), %ecx + movl $(CODE_CACHE_MASK | MTRR_PHYS_MASK_VALID), %eax movl $CPU_PHYSMASK_HI, %edx wrmsr /* Enable MTRR. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr - orl $MTRRdefTypeEn, %eax + orl $MTRR_DEF_TYPE_EN, %eax wrmsr post_code(0x26) @@ -198,9 +198,9 @@ before_romstage: post_code(0x2c) /* Disable MTRR. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr - andl $(~MTRRdefTypeEn), %eax + andl $(~MTRR_DEF_TYPE_EN), %eax wrmsr invd @@ -225,7 +225,7 @@ before_romstage: /* Get number of MTRRs. */ popl %ebx - movl $MTRRphysBase_MSR(0), %ecx + movl $MTRR_PHYS_BASE(0), %ecx 1: testl %ebx, %ebx jz 1f @@ -258,9 +258,9 @@ before_romstage: post_code(0x30) /* Enable MTRR. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr - orl $MTRRdefTypeEn, %eax + orl $MTRR_DEF_TYPE_EN, %eax wrmsr post_code(0x31) diff --git a/src/soc/intel/baytrail/romstage/romstage.c b/src/soc/intel/baytrail/romstage/romstage.c index c7f66bb4a2..81978b4f91 100644 --- a/src/soc/intel/baytrail/romstage/romstage.c +++ b/src/soc/intel/baytrail/romstage/romstage.c @@ -311,14 +311,14 @@ static void *setup_stack_and_mttrs(void) /* Cache the ROM as WP just below 4GiB. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT); num_mtrrs++; /* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK); num_mtrrs++; @@ -329,7 +329,7 @@ static void *setup_stack_and_mttrs(void) * this area as cacheable so it can be used later for ramstage before * setting up the entire RAM as cacheable. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK); num_mtrrs++; @@ -340,7 +340,7 @@ static void *setup_stack_and_mttrs(void) * provides faster access when relocating the SMM handler as well * as using the TSEG region for other purposes. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK); num_mtrrs++; diff --git a/src/soc/intel/braswell/bootblock/bootblock.c b/src/soc/intel/braswell/bootblock/bootblock.c index f98f694f41..ca192100b7 100644 --- a/src/soc/intel/braswell/bootblock/bootblock.c +++ b/src/soc/intel/braswell/bootblock/bootblock.c @@ -30,10 +30,10 @@ static void set_var_mtrr(int reg, uint32_t base, uint32_t size, int type) msr_t basem, maskm; basem.lo = base | type; basem.hi = 0; - wrmsr(MTRRphysBase_MSR(reg), basem); - maskm.lo = ~(size - 1) | MTRRphysMaskValid; + wrmsr(MTRR_PHYS_BASE(reg), basem); + maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID; maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1; - wrmsr(MTRRphysMask_MSR(reg), maskm); + wrmsr(MTRR_PHYS_MASK(reg), maskm); } static void enable_rom_caching(void) @@ -48,7 +48,7 @@ static void enable_rom_caching(void) /* Enable Variable MTRRs */ msr.hi = 0x00000000; msr.lo = 0x00000800; - wrmsr(MTRRdefType_MSR, msr); + wrmsr(MTRR_DEF_TYPE_MSR, msr); } static void setup_mmconfig(void) diff --git a/src/soc/intel/braswell/cpu.c b/src/soc/intel/braswell/cpu.c index 2ab8725818..41a43ee50b 100644 --- a/src/soc/intel/braswell/cpu.c +++ b/src/soc/intel/braswell/cpu.c @@ -206,10 +206,10 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) /* Set up SMRR. */ smrr.lo = relo_attrs.smrr_base; smrr.hi = 0; - wrmsr(SMRRphysBase_MSR, smrr); + wrmsr(SMRR_PHYS_BASE, smrr); smrr.lo = relo_attrs.smrr_mask; smrr.hi = 0; - wrmsr(SMRRphysMask_MSR, smrr); + wrmsr(SMRR_PHYS_MASK, smrr); /* * The relocated handler runs with all CPUs concurrently. Therefore @@ -284,7 +284,7 @@ static int smm_load_handlers(void) relo_attrs.smbase = (uint32_t)smm_base; relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK; relo_attrs.smrr_mask = ~(smm_size - 1) & rmask; - relo_attrs.smrr_mask |= MTRRphysMaskValid; + relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; /* Install handlers. */ if (install_relocation_handler(pattrs->num_cpus) < 0) { diff --git a/src/soc/intel/broadwell/bootblock/cpu.c b/src/soc/intel/broadwell/bootblock/cpu.c index 83bd873739..6e9d3a5bb8 100644 --- a/src/soc/intel/broadwell/bootblock/cpu.c +++ b/src/soc/intel/broadwell/bootblock/cpu.c @@ -36,10 +36,10 @@ static void set_var_mtrr( msr_t basem, maskm; basem.lo = base | type; basem.hi = 0; - wrmsr(MTRRphysBase_MSR(reg), basem); - maskm.lo = ~(size - 1) | MTRRphysMaskValid; + wrmsr(MTRR_PHYS_BASE(reg), basem); + maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID; maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1; - wrmsr(MTRRphysMask_MSR(reg), maskm); + wrmsr(MTRR_PHYS_MASK(reg), maskm); } static void enable_rom_caching(void) @@ -54,7 +54,7 @@ static void enable_rom_caching(void) /* Enable Variable MTRRs */ msr.hi = 0x00000000; msr.lo = 0x00000800; - wrmsr(MTRRdefType_MSR, msr); + wrmsr(MTRR_DEF_TYPE_MSR, msr); } static void bootblock_mdelay(int ms) @@ -120,12 +120,12 @@ static void set_flex_ratio_to_tdp_nominal(void) static void check_for_clean_reset(void) { msr_t msr; - msr = rdmsr(MTRRdefType_MSR); + msr = rdmsr(MTRR_DEF_TYPE_MSR); /* Use the MTRR default type MSR as a proxy for detecting INIT#. * Reset the system if any known bits are set in that MSR. That is * an indication of the CPU not being properly reset. */ - if (msr.lo & (MTRRdefTypeEn | MTRRdefTypeFixEn)) { + if (msr.lo & (MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN)) { outb(0x0, 0xcf9); outb(0x6, 0xcf9); halt(); diff --git a/src/soc/intel/broadwell/include/soc/msr.h b/src/soc/intel/broadwell/include/soc/msr.h index 914a11f016..7ed61f4350 100644 --- a/src/soc/intel/broadwell/include/soc/msr.h +++ b/src/soc/intel/broadwell/include/soc/msr.h @@ -102,7 +102,7 @@ #define SMBASE_MSR 0xc20 #define IEDBASE_MSR 0xc22 -/* MTRRcap_MSR bits */ +/* MTRR_CAP_MSR bits */ #define SMRR_SUPPORTED (1<<11) #define EMRR_SUPPORTED (1<<12) diff --git a/src/soc/intel/broadwell/romstage/cache_as_ram.inc b/src/soc/intel/broadwell/romstage/cache_as_ram.inc index 3f1b12af18..8359e4a76e 100644 --- a/src/soc/intel/broadwell/romstage/cache_as_ram.inc +++ b/src/soc/intel/broadwell/romstage/cache_as_ram.inc @@ -76,31 +76,31 @@ clear_mtrrs: post_code(0x22) /* Configure the default memory type to uncacheable. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr andl $(~0x00000cff), %eax wrmsr post_code(0x23) /* Set Cache-as-RAM base address. */ - movl $(MTRRphysBase_MSR(0)), %ecx + movl $(MTRR_PHYS_BASE(0)), %ecx movl $(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax xorl %edx, %edx wrmsr post_code(0x24) /* Set Cache-as-RAM mask. */ - movl $(MTRRphysMask_MSR(0)), %ecx - movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax + movl $(MTRR_PHYS_MASK(0)), %ecx + movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax movl $CPU_PHYSMASK_HI, %edx wrmsr post_code(0x25) /* Enable MTRR. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr - orl $MTRRdefTypeEn, %eax + orl $MTRR_DEF_TYPE_EN, %eax wrmsr /* Enable cache (CR0.CD = 0, CR0.NW = 0). */ @@ -136,7 +136,7 @@ clear_mtrrs: movl %eax, %cr0 /* Enable cache for our code in Flash because we do XIP here */ - movl $MTRRphysBase_MSR(1), %ecx + movl $MTRR_PHYS_BASE(1), %ecx xorl %edx, %edx /* * IMPORTANT: The following calculation _must_ be done at runtime. See @@ -147,19 +147,19 @@ clear_mtrrs: orl $MTRR_TYPE_WRPROT, %eax wrmsr - movl $MTRRphysMask_MSR(1), %ecx + movl $MTRR_PHYS_MASK(1), %ecx movl $CPU_PHYSMASK_HI, %edx - movl $(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax + movl $(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax wrmsr post_code(0x27) /* Enable caching for ram init code to run faster */ - movl $MTRRphysBase_MSR(2), %ecx + movl $MTRR_PHYS_BASE(2), %ecx movl $(CACHE_MRC_BASE | MTRR_TYPE_WRPROT), %eax xorl %edx, %edx wrmsr - movl $MTRRphysMask_MSR(2), %ecx - movl $(CACHE_MRC_MASK | MTRRphysMaskValid), %eax + movl $MTRR_PHYS_MASK(2), %ecx + movl $(CACHE_MRC_MASK | MTRR_PHYS_MASK_VALID), %eax movl $CPU_PHYSMASK_HI, %edx wrmsr @@ -217,9 +217,9 @@ before_romstage: post_code(0x31) /* Disable MTRR. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr - andl $(~MTRRdefTypeEn), %eax + andl $(~MTRR_DEF_TYPE_EN), %eax wrmsr post_code(0x31) @@ -240,9 +240,9 @@ before_romstage: /* Clear MTRR that was used to cache MRC */ xorl %eax, %eax xorl %edx, %edx - movl $MTRRphysBase_MSR(2), %ecx + movl $MTRR_PHYS_BASE(2), %ecx wrmsr - movl $MTRRphysMask_MSR(2), %ecx + movl $MTRR_PHYS_MASK(2), %ecx wrmsr post_code(0x33) @@ -266,7 +266,7 @@ before_romstage: /* Get number of MTRRs. */ popl %ebx - movl $MTRRphysBase_MSR(0), %ecx + movl $MTRR_PHYS_BASE(0), %ecx 1: testl %ebx, %ebx jz 1f @@ -299,9 +299,9 @@ before_romstage: post_code(0x3a) /* Enable MTRR. */ - movl $MTRRdefType_MSR, %ecx + movl $MTRR_DEF_TYPE_MSR, %ecx rdmsr - orl $MTRRdefTypeEn, %eax + orl $MTRR_DEF_TYPE_EN, %eax wrmsr post_code(0x3b) diff --git a/src/soc/intel/broadwell/romstage/stack.c b/src/soc/intel/broadwell/romstage/stack.c index ed8e9c3299..e66ce75e4b 100644 --- a/src/soc/intel/broadwell/romstage/stack.c +++ b/src/soc/intel/broadwell/romstage/stack.c @@ -82,14 +82,14 @@ void *setup_stack_and_mttrs(void) /* Cache the ROM as WP just below 4GiB. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT); num_mtrrs++; /* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK); num_mtrrs++; @@ -100,7 +100,7 @@ void *setup_stack_and_mttrs(void) * this area as cacheable so it can be used later for ramstage before * setting up the entire RAM as cacheable. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK); num_mtrrs++; @@ -111,7 +111,7 @@ void *setup_stack_and_mttrs(void) * provides faster access when relocating the SMM handler as well * as using the TSEG region for other purposes. */ slot = stack_push(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid); + slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push(slot, 0); /* upper base */ slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK); num_mtrrs++; diff --git a/src/soc/intel/broadwell/smmrelocate.c b/src/soc/intel/broadwell/smmrelocate.c index f4525da672..4d595f19a2 100644 --- a/src/soc/intel/broadwell/smmrelocate.c +++ b/src/soc/intel/broadwell/smmrelocate.c @@ -42,8 +42,8 @@ static inline void write_smrr(struct smm_relocation_params *relo_params) { printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n", relo_params->smrr_base.lo, relo_params->smrr_mask.lo); - wrmsr(SMRRphysBase_MSR, relo_params->smrr_base); - wrmsr(SMRRphysMask_MSR, relo_params->smrr_mask); + wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base); + wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask); } static inline void write_emrr(struct smm_relocation_params *relo_params) @@ -183,7 +183,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) update_save_state(cpu, relo_params, runtime); /* Write EMRR and SMRR MSRs based on indicated support. */ - mtrr_cap = rdmsr(MTRRcap_MSR); + mtrr_cap = rdmsr(MTRR_CAP_MSR); if (mtrr_cap.lo & SMRR_SUPPORTED) write_smrr(relo_params); @@ -241,7 +241,7 @@ static void fill_in_relocation_params(device_t dev, /* SMRR has 32-bits of valid address aligned to 4KiB. */ params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK; params->smrr_base.hi = 0; - params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRRphysMaskValid; + params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; params->smrr_mask.hi = 0; /* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */ @@ -252,14 +252,14 @@ static void fill_in_relocation_params(device_t dev, * on the number of physical address bits supported. */ params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK; params->emrr_base.hi = 0; - params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRRphysMaskValid; + params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1; /* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */ params->uncore_emrr_base.lo = emrr_base; params->uncore_emrr_base.hi = 0; params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) | - MTRRphysMaskValid; + MTRR_PHYS_MASK_VALID; params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; } diff --git a/src/soc/intel/common/stack.c b/src/soc/intel/common/stack.c index 45e61f9675..6cf03f27b7 100644 --- a/src/soc/intel/common/stack.c +++ b/src/soc/intel/common/stack.c @@ -95,7 +95,7 @@ void *setup_stack_and_mtrrs(void) /* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */ slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push32(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid); + slot = stack_push32(slot, ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push32(slot, 0); /* upper base */ slot = stack_push32(slot, 0 | MTRR_TYPE_WRBACK); num_mtrrs++; @@ -133,7 +133,7 @@ void *setup_stack_and_mtrrs(void) * of the FSP reserved memory region. */ slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push32(slot, ~(alignment - 1) | MTRRphysMaskValid); + slot = stack_push32(slot, ~(alignment - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push32(slot, 0); /* upper base */ slot = stack_push32(slot, aligned_ram | MTRR_TYPE_WRBACK); num_mtrrs++; @@ -152,7 +152,7 @@ void *setup_stack_and_mtrrs(void) smm_region(&smm_base, &smm_size); tseg_base = (uint32_t)smm_base; slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push32(slot, ~(alignment - 1) | MTRRphysMaskValid); + slot = stack_push32(slot, ~(alignment - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push32(slot, 0); /* upper base */ slot = stack_push32(slot, tseg_base | MTRR_TYPE_WRBACK); num_mtrrs++; @@ -160,7 +160,7 @@ void *setup_stack_and_mtrrs(void) /* Cache the ROM as WP just below 4GiB. */ slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */ - slot = stack_push32(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid); + slot = stack_push32(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID); slot = stack_push32(slot, 0); /* upper base */ slot = stack_push32(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT); num_mtrrs++; diff --git a/src/soc/intel/common/util.c b/src/soc/intel/common/util.c index a6adfafa2b..591c1001b5 100644 --- a/src/soc/intel/common/util.c +++ b/src/soc/intel/common/util.c @@ -77,10 +77,10 @@ uint32_t soc_get_variable_mtrr_count(uint64_t *msr) msr_t s; } mttrcap; - mttrcap.s = rdmsr(MTRRcap_MSR); + mttrcap.s = rdmsr(MTRR_CAP_MSR); if (msr != NULL) *msr = mttrcap.u64; - return mttrcap.u64 & MTRRcapVcnt; + return mttrcap.u64 & MTRR_CAP_VCNT; } static const char *soc_display_mtrr_type(uint32_t type) @@ -105,13 +105,13 @@ static void soc_display_mtrr_fixed_types(uint64_t msr, uint32_t next_type; uint32_t type; - type = msr & MTRRdefTypeType; + type = msr & MTRR_DEF_TYPE_MASK; base_address = starting_address; next_address = base_address; for (index = 0; index < 64; index += 8) { next_address = starting_address + (memory_size * ((index >> 3) + 1)); - next_type = (msr >> index) & MTRRdefTypeType; + next_type = (msr >> index) & MTRR_DEF_TYPE_MASK; if (next_type != type) { printk(BIOS_DEBUG, " 0x%08x - 0x%08x: %s\n", base_address, next_address - 1, @@ -159,7 +159,7 @@ static void soc_display_64k_mtrr(void) msr_t s; } msr; - msr.s = rdmsr(MTRRfix64K_00000_MSR); + msr.s = rdmsr(MTRR_FIX_64K_00000); printk(BIOS_DEBUG, "0x%016llx: IA32_MTRR_FIX64K_00000\n", msr.u64); soc_display_mtrr_fixed_types(msr.u64, 0, 0x10000); } @@ -173,9 +173,9 @@ static uint32_t soc_display_mtrrcap(void) printk(BIOS_DEBUG, "0x%016llx: IA32_MTRRCAP: %s%s%s%d variable MTRRs\n", msr, - (msr & MTRRcapSmrr) ? "SMRR, " : "", - (msr & MTRRcapWc) ? "WC, " : "", - (msr & MTRRcapFix) ? "FIX, " : "", + (msr & MTRR_CAP_SMRR) ? "SMRR, " : "", + (msr & MTRR_CAP_WC) ? "WC, " : "", + (msr & MTRR_CAP_FIX) ? "FIX, " : "", variable_mtrrs); return variable_mtrrs; } @@ -187,12 +187,12 @@ static void soc_display_mtrr_def_type(void) msr_t s; } msr; - msr.s = rdmsr(MTRRdefType_MSR); + msr.s = rdmsr(MTRR_DEF_TYPE_MSR); printk(BIOS_DEBUG, "0x%016llx: IA32_MTRR_DEF_TYPE:%s%s %s\n", msr.u64, - (msr.u64 & MTRRdefTypeEn) ? " E," : "", - (msr.u64 & MTRRdefTypeFixEn) ? " FE," : "", - soc_display_mtrr_type((uint32_t)(msr.u64 & MTRRdefTypeType))); + (msr.u64 & MTRR_DEF_TYPE_EN) ? " E," : "", + (msr.u64 & MTRR_DEF_TYPE_FIX_EN) ? " FE," : "", + soc_display_mtrr_type((uint32_t)(msr.u64 & MTRR_DEF_TYPE_MASK))); } static void soc_display_variable_mtrr(uint32_t msr_reg, int index, @@ -212,13 +212,13 @@ static void soc_display_variable_mtrr(uint32_t msr_reg, int index, msr_a.s = rdmsr(msr_reg); msr_m.s = rdmsr(msr_reg + 1); - if (msr_m.u64 & MTRRphysMaskValid) { + if (msr_m.u64 & MTRR_PHYS_MASK_VALID) { base_address = (msr_a.u64 & 0xfffffffffffff000ULL) & address_mask; printk(BIOS_DEBUG, "0x%016llx: PHYBASE%d: Address = 0x%016llx, %s\n", msr_a.u64, index, base_address, - soc_display_mtrr_type(msr_a.u64 & MTRRdefTypeType)); + soc_display_mtrr_type(msr_a.u64 & MTRR_DEF_TYPE_MASK)); mask = (msr_m.u64 & 0xfffffffffffff000ULL) & address_mask; length = (~mask & address_mask) + 1; printk(BIOS_DEBUG, @@ -243,32 +243,32 @@ asmlinkage void soc_display_mtrrs(void) variable_mtrrs = soc_display_mtrrcap(); soc_display_mtrr_def_type(); soc_display_64k_mtrr(); - soc_display_16k_mtrr(MTRRfix16K_80000_MSR, 0x80000, + soc_display_16k_mtrr(MTRR_FIX_16K_80000, 0x80000, "IA32_MTRR_FIX16K_80000"); - soc_display_16k_mtrr(MTRRfix16K_A0000_MSR, 0xa0000, + soc_display_16k_mtrr(MTRR_FIX_16K_A0000, 0xa0000, "IA32_MTRR_FIX16K_A0000"); - soc_display_4k_mtrr(MTRRfix4K_C0000_MSR, 0xc0000, + soc_display_4k_mtrr(MTRR_FIX_4K_C0000, 0xc0000, "IA32_MTRR_FIX4K_C0000"); - soc_display_4k_mtrr(MTRRfix4K_C8000_MSR, 0xc8000, + soc_display_4k_mtrr(MTRR_FIX_4K_C8000, 0xc8000, "IA32_MTRR_FIX4K_C8000"); - soc_display_4k_mtrr(MTRRfix4K_D0000_MSR, 0xd0000, + soc_display_4k_mtrr(MTRR_FIX_4K_D0000, 0xd0000, "IA32_MTRR_FIX4K_D0000"); - soc_display_4k_mtrr(MTRRfix4K_D8000_MSR, 0xd8000, + soc_display_4k_mtrr(MTRR_FIX_4K_D8000, 0xd8000, "IA32_MTRR_FIX4K_D8000"); - soc_display_4k_mtrr(MTRRfix4K_E0000_MSR, 0xe0000, + soc_display_4k_mtrr(MTRR_FIX_4K_E0000, 0xe0000, "IA32_MTRR_FIX4K_E0000"); - soc_display_4k_mtrr(MTRRfix4K_E8000_MSR, 0xe8000, + soc_display_4k_mtrr(MTRR_FIX_4K_E8000, 0xe8000, "IA32_MTRR_FIX4K_E8000"); - soc_display_4k_mtrr(MTRRfix4K_F0000_MSR, 0xf0000, + soc_display_4k_mtrr(MTRR_FIX_4K_F0000, 0xf0000, "IA32_MTRR_FIX4K_F0000"); - soc_display_4k_mtrr(MTRRfix4K_F8000_MSR, 0xf8000, + soc_display_4k_mtrr(MTRR_FIX_4K_F8000, 0xf8000, "IA32_MTRR_FIX4K_F8000"); address_bits = cpu_phys_address_size(); address_mask = (1ULL << address_bits) - 1; /* Display the variable MTRRs */ for (i = 0; i < variable_mtrrs; i++) - soc_display_variable_mtrr(MTRRphysBase_MSR(i), i, + soc_display_variable_mtrr(MTRR_PHYS_BASE(i), i, address_mask); } } diff --git a/src/soc/intel/fsp_baytrail/bootblock/bootblock.c b/src/soc/intel/fsp_baytrail/bootblock/bootblock.c index 50d321bccc..41b911e690 100644 --- a/src/soc/intel/fsp_baytrail/bootblock/bootblock.c +++ b/src/soc/intel/fsp_baytrail/bootblock/bootblock.c @@ -52,10 +52,10 @@ static void set_var_mtrr(int reg, uint32_t base, uint32_t size, int type) msr_t basem, maskm; basem.lo = base | type; basem.hi = 0; - wrmsr(MTRRphysBase_MSR(reg), basem); - maskm.lo = ~(size - 1) | MTRRphysMaskValid; + wrmsr(MTRR_PHYS_BASE(reg), basem); + maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID; maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1; - wrmsr(MTRRphysMask_MSR(reg), maskm); + wrmsr(MTRR_PHYS_MASK(reg), maskm); } /* @@ -80,7 +80,7 @@ static void enable_rom_caching(void) /* Enable Variable MTRRs */ msr.hi = 0x00000000; msr.lo = 0x00000800; - wrmsr(MTRRdefType_MSR, msr); + wrmsr(MTRR_DEF_TYPE_MSR, msr); } static void setup_mmconfig(void) diff --git a/src/soc/intel/fsp_baytrail/cpu.c b/src/soc/intel/fsp_baytrail/cpu.c index c7ee5825d7..8fe1df36b2 100644 --- a/src/soc/intel/fsp_baytrail/cpu.c +++ b/src/soc/intel/fsp_baytrail/cpu.c @@ -173,10 +173,10 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) /* Set up SMRR. */ smrr.lo = relo_attrs.smrr_base; smrr.hi = 0; - wrmsr(SMRRphysBase_MSR, smrr); + wrmsr(SMRR_PHYS_BASE, smrr); smrr.lo = relo_attrs.smrr_mask; smrr.hi = 0; - wrmsr(SMRRphysMask_MSR, smrr); + wrmsr(SMRR_PHYS_MASK, smrr); /* The relocated handler runs with all CPUs concurrently. Therefore * stagger the entry points adjusting SMBASE downwards by save state @@ -243,7 +243,7 @@ static int smm_load_handlers(void) relo_attrs.smbase = (uint32_t)smm_region_start(); relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK; relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask; - relo_attrs.smrr_mask |= MTRRphysMaskValid; + relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; /* Install handlers. */ if (install_relocation_handler(pattrs->num_cpus) < 0) { diff --git a/src/soc/intel/skylake/bootblock/cpu.c b/src/soc/intel/skylake/bootblock/cpu.c index 3a29972370..70bf928761 100644 --- a/src/soc/intel/skylake/bootblock/cpu.c +++ b/src/soc/intel/skylake/bootblock/cpu.c @@ -45,10 +45,10 @@ static void set_var_mtrr( msr_t basem, maskm; basem.lo = base | type; basem.hi = 0; - wrmsr(MTRRphysBase_MSR(reg), basem); - maskm.lo = ~(size - 1) | MTRRphysMaskValid; + wrmsr(MTRR_PHYS_BASE(reg), basem); + maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID; maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1; - wrmsr(MTRRphysMask_MSR(reg), maskm); + wrmsr(MTRR_PHYS_MASK(reg), maskm); } static void enable_rom_caching(void) @@ -62,7 +62,7 @@ static void enable_rom_caching(void) /* Enable Variable MTRRs */ msr.hi = 0x00000000; msr.lo = 0x00000800; - wrmsr(MTRRdefType_MSR, msr); + wrmsr(MTRR_DEF_TYPE_MSR, msr); } static void bootblock_mdelay(int ms) @@ -164,14 +164,14 @@ static void set_flex_ratio_to_tdp_nominal(void) static void check_for_clean_reset(void) { msr_t msr; - msr = rdmsr(MTRRdefType_MSR); + msr = rdmsr(MTRR_DEF_TYPE_MSR); /* * Use the MTRR default type MSR as a proxy for detecting INIT#. * Reset the system if any known bits are set in that MSR. That is * an indication of the CPU not being properly reset. */ - if (msr.lo & (MTRRdefTypeEn | MTRRdefTypeFixEn)) + if (msr.lo & (MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN)) soft_reset(); } @@ -191,7 +191,7 @@ static void patch_microcode(void) * MTRRCAP[12]. Check for this feature and avoid reloading the * same microcode during early cpu initialization. */ - msr = rdmsr(MTRRcap_MSR); + msr = rdmsr(MTRR_CAP_MSR); if ((msr.lo & PRMRR_SUPPORTED) && (current_rev != patch->rev - 1)) intel_update_microcode_from_cbfs(); } diff --git a/src/soc/intel/skylake/cpu.c b/src/soc/intel/skylake/cpu.c index 0cc9bcaeed..ba1a96cf8e 100644 --- a/src/soc/intel/skylake/cpu.c +++ b/src/soc/intel/skylake/cpu.c @@ -467,6 +467,6 @@ int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id) * MTRRCAP[12]. Check for this feature and avoid reloading the * same microcode during cpu initialization. */ - msr = rdmsr(MTRRcap_MSR); + msr = rdmsr(MTRR_CAP_MSR); return (msr.lo & PRMRR_SUPPORTED) && (current_patch_id == new_patch_id - 1); } diff --git a/src/soc/intel/skylake/include/soc/msr.h b/src/soc/intel/skylake/include/soc/msr.h index 4239b36996..d514231ce6 100644 --- a/src/soc/intel/skylake/include/soc/msr.h +++ b/src/soc/intel/skylake/include/soc/msr.h @@ -103,7 +103,7 @@ #define SMBASE_MSR 0xc20 #define IEDBASE_MSR 0xc22 -/* MTRRcap_MSR bits */ +/* MTRR_CAP_MSR bits */ #define SMRR_SUPPORTED (1<<11) #define PRMRR_SUPPORTED (1<<12) diff --git a/src/soc/intel/skylake/smmrelocate.c b/src/soc/intel/skylake/smmrelocate.c index 4b6f1c4b60..8d2c545ae4 100644 --- a/src/soc/intel/skylake/smmrelocate.c +++ b/src/soc/intel/skylake/smmrelocate.c @@ -44,8 +44,8 @@ static inline void write_smrr(struct smm_relocation_params *relo_params) { printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n", relo_params->smrr_base.lo, relo_params->smrr_mask.lo); - wrmsr(SMRRphysBase_MSR, relo_params->smrr_base); - wrmsr(SMRRphysMask_MSR, relo_params->smrr_mask); + wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base); + wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask); } static inline void write_uncore_emrr(struct smm_relocation_params *relo_params) @@ -191,7 +191,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) update_save_state(cpu, relo_params, runtime); /* Write EMRR and SMRR MSRs based on indicated support. */ - mtrr_cap = rdmsr(MTRRcap_MSR); + mtrr_cap = rdmsr(MTRR_CAP_MSR); if (mtrr_cap.lo & SMRR_SUPPORTED) write_smrr(relo_params); } @@ -230,7 +230,7 @@ static void fill_in_relocation_params(device_t dev, /* SMRR has 32-bits of valid address aligned to 4KiB. */ params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK; params->smrr_base.hi = 0; - params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRRphysMaskValid; + params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; params->smrr_mask.hi = 0; /* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */ @@ -243,14 +243,14 @@ static void fill_in_relocation_params(device_t dev, */ params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK; params->emrr_base.hi = 0; - params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRRphysMaskValid; + params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1; /* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */ params->uncore_emrr_base.lo = emrr_base; params->uncore_emrr_base.hi = 0; params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) | - MTRRphysMaskValid; + MTRR_PHYS_MASK_VALID; params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; } -- cgit v1.2.3