summaryrefslogtreecommitdiff
path: root/src/soc/amd/common/block/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/soc/amd/common/block/cpu')
-rw-r--r--src/soc/amd/common/block/cpu/Kconfig1
-rw-r--r--src/soc/amd/common/block/cpu/noncar/Makefile.inc1
-rw-r--r--src/soc/amd/common/block/cpu/noncar/cpu.c8
-rw-r--r--src/soc/amd/common/block/cpu/smm/smm_relocate.c7
4 files changed, 10 insertions, 7 deletions
diff --git a/src/soc/amd/common/block/cpu/Kconfig b/src/soc/amd/common/block/cpu/Kconfig
index 6c5329aa9b..f926887cf5 100644
--- a/src/soc/amd/common/block/cpu/Kconfig
+++ b/src/soc/amd/common/block/cpu/Kconfig
@@ -13,6 +13,7 @@ config SOC_AMD_COMMON_BLOCK_CAR
config SOC_AMD_COMMON_BLOCK_NONCAR
bool
+ select RESERVED_PHYSICAL_ADDRESS_BITS_SUPPORT
help
From family 17h on AMD CPUs/APUs don't use cache as RAM (CAR) any
more, since the RAM initialization is already done by the PSP when
diff --git a/src/soc/amd/common/block/cpu/noncar/Makefile.inc b/src/soc/amd/common/block/cpu/noncar/Makefile.inc
index 3204667fc1..f8ca357c40 100644
--- a/src/soc/amd/common/block/cpu/noncar/Makefile.inc
+++ b/src/soc/amd/common/block/cpu/noncar/Makefile.inc
@@ -2,6 +2,7 @@
ifeq ($(CONFIG_SOC_AMD_COMMON_BLOCK_NONCAR),y)
bootblock-y += bootblock.c
+bootblock-y += cpu.c
bootblock-y += early_cache.c
bootblock-y += pre_c.S
bootblock-y += write_resume_eip.c
diff --git a/src/soc/amd/common/block/cpu/noncar/cpu.c b/src/soc/amd/common/block/cpu/noncar/cpu.c
index eec593c8a9..eefd62fcf0 100644
--- a/src/soc/amd/common/block/cpu/noncar/cpu.c
+++ b/src/soc/amd/common/block/cpu/noncar/cpu.c
@@ -35,7 +35,8 @@ void set_cstate_io_addr(void)
wrmsr(MSR_CSTATE_ADDRESS, cst_addr);
}
-static uint32_t get_sme_reserved_address_bits(void)
+/* Number of most significant physical address bits reserved for secure memory encryption */
+unsigned int get_reserved_phys_addr_bits(void)
{
if (rdmsr(SYSCFG_MSR).raw & SYSCFG_MSR_SMEE)
return (cpuid_ebx(CPUID_EBX_MEM_ENCRYPT) &
@@ -44,8 +45,3 @@ static uint32_t get_sme_reserved_address_bits(void)
else
return 0;
}
-
-uint32_t get_usable_physical_address_bits(void)
-{
- return cpu_phys_address_size() - get_sme_reserved_address_bits();
-}
diff --git a/src/soc/amd/common/block/cpu/smm/smm_relocate.c b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
index 4004726e29..ae008853bd 100644
--- a/src/soc/amd/common/block/cpu/smm/smm_relocate.c
+++ b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
@@ -65,6 +65,11 @@ static void smm_relocation_handler(void)
uintptr_t tseg_base;
size_t tseg_size;
+ /* For the TSEG masks all physical address bits including the ones reserved for memory
+ encryption need to be taken into account. TODO: Find out why this is the case */
+ const unsigned int total_physical_address_bits =
+ cpu_phys_address_size() + get_reserved_phys_addr_bits();
+
smm_region(&tseg_base, &tseg_size);
msr_t msr;
@@ -73,7 +78,7 @@ static void smm_relocation_handler(void)
msr.lo = ~(tseg_size - 1);
msr.lo |= SMM_TSEG_WB;
- msr.hi = (1 << (cpu_phys_address_size() - 32)) - 1;
+ msr.hi = (1 << (total_physical_address_bits - 32)) - 1;
wrmsr(SMM_MASK_MSR, msr);
uintptr_t smbase = smm_get_cpu_smbase(cpu_index());