diff options
-rw-r--r-- | src/cpu/x86/mtrr/mtrr.c | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/src/cpu/x86/mtrr/mtrr.c b/src/cpu/x86/mtrr/mtrr.c index 81ea6da91d..74aae64a89 100644 --- a/src/cpu/x86/mtrr/mtrr.c +++ b/src/cpu/x86/mtrr/mtrr.c @@ -142,10 +142,12 @@ static struct memranges *get_physical_address_space(void) * time remove unacheable regions from the cacheable ones. */ if (addr_space == NULL) { struct range_entry *r; - const unsigned long mask = IORESOURCE_CACHEABLE; + unsigned long mask; + unsigned long match; addr_space = &addr_space_storage; + mask = IORESOURCE_CACHEABLE; /* Collect cacheable and uncacheable address ranges. The * uncacheable regions take precedence over the cacheable * regions. */ @@ -153,6 +155,14 @@ static struct memranges *get_physical_address_space(void) memranges_add_resources(addr_space, mask, 0, MTRR_TYPE_UNCACHEABLE); + /* Handle any write combining resources. Only prefetchable + * resources with the IORESOURCE_WRCOMB flag are appropriate + * for this MTRR type. */ + match = IORESOURCE_PREFETCH | IORESOURCE_WRCOMB; + mask |= match; + memranges_add_resources(addr_space, mask, match, + MTRR_TYPE_WRCOMB); + /* The address space below 4GiB is special. It needs to be * covered entirly by range entries so that MTRR calculations * can be properly done for the full 32-bit address space. |