[coreboot-gerrit] Patch set updated for coreboot: cpu/mtrr.h: Fix macro names for MTRR registers

Alexandru Gagniuc (mr.nuke.me@gmail.com) gerrit at coreboot.org
Thu Oct 15 03:31:33 CET 2015


Alexandru Gagniuc (mr.nuke.me at gmail.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/11761

-gerrit

commit 2417e19183b8e67c225c944cf2737aa76920e0cc
Author: Alexandru Gagniuc <mr.nuke.me at gmail.com>
Date:   Wed Sep 30 20:23:09 2015 -0700

    cpu/mtrr.h: Fix macro names for MTRR registers
    
    We use UNDERSCORE_CASE. For the MTRR macros that refer to an MSR,
    we also remove the _MSR suffix, as they are, by definition, MSRs.
    
    Change-Id: Id4483a75d62cf1b478a9105ee98a8f55140ce0ef
    Signed-off-by: Alexandru Gagniuc <mr.nuke.me at gmail.com>
---
 src/cpu/amd/agesa/s3_resume.c                     |  8 +-
 src/cpu/amd/car/cache_as_ram.inc                  | 76 +++++++++----------
 src/cpu/amd/car/disable_cache_as_ram.c            | 10 +--
 src/cpu/amd/model_fxx/model_fxx_init.c            | 22 +++---
 src/cpu/amd/pi/s3_resume.c                        |  8 +-
 src/cpu/amd/smm/smm_init.c                        |  6 +-
 src/cpu/intel/car/cache_as_ram.inc                | 80 ++++++++++----------
 src/cpu/intel/car/cache_as_ram_ht.inc             | 46 ++++++------
 src/cpu/intel/fsp_model_406dx/bootblock.c         |  8 +-
 src/cpu/intel/haswell/bootblock.c                 | 12 +--
 src/cpu/intel/haswell/cache_as_ram.inc            | 38 +++++-----
 src/cpu/intel/haswell/romstage.c                  |  8 +-
 src/cpu/intel/haswell/smmrelocate.c               | 12 +--
 src/cpu/intel/model_2065x/bootblock.c             |  8 +-
 src/cpu/intel/model_2065x/cache_as_ram.inc        | 42 +++++------
 src/cpu/intel/model_206ax/bootblock.c             |  8 +-
 src/cpu/intel/model_206ax/cache_as_ram.inc        | 48 ++++++------
 src/cpu/intel/model_6ex/cache_as_ram.inc          | 38 +++++-----
 src/cpu/intel/smm/gen1/smmrelocate.c              |  8 +-
 src/cpu/via/car/cache_as_ram.inc                  | 90 +++++++++++------------
 src/cpu/x86/mp_init.c                             | 16 ++--
 src/cpu/x86/mtrr/earlymtrr.c                      | 14 ++--
 src/cpu/x86/mtrr/mtrr.c                           | 38 +++++-----
 src/drivers/intel/fsp1_1/after_raminit.S          |  8 +-
 src/include/cpu/x86/mtrr.h                        | 83 ++++++++++-----------
 src/northbridge/amd/amdk8/raminit_f_dqs.c         |  6 +-
 src/northbridge/intel/e7505/raminit.c             |  6 +-
 src/northbridge/intel/nehalem/raminit.c           | 12 +--
 src/soc/intel/baytrail/bootblock/bootblock.c      |  8 +-
 src/soc/intel/baytrail/cpu.c                      |  6 +-
 src/soc/intel/baytrail/romstage/cache_as_ram.inc  | 28 +++----
 src/soc/intel/baytrail/romstage/romstage.c        |  8 +-
 src/soc/intel/braswell/bootblock/bootblock.c      |  8 +-
 src/soc/intel/braswell/cpu.c                      |  6 +-
 src/soc/intel/broadwell/bootblock/cpu.c           | 12 +--
 src/soc/intel/broadwell/include/soc/msr.h         |  2 +-
 src/soc/intel/broadwell/romstage/cache_as_ram.inc | 38 +++++-----
 src/soc/intel/broadwell/romstage/stack.c          |  8 +-
 src/soc/intel/broadwell/smmrelocate.c             | 12 +--
 src/soc/intel/common/stack.c                      |  8 +-
 src/soc/intel/common/util.c                       | 50 ++++++-------
 src/soc/intel/fsp_baytrail/bootblock/bootblock.c  |  8 +-
 src/soc/intel/fsp_baytrail/cpu.c                  |  6 +-
 src/soc/intel/skylake/bootblock/cpu.c             | 14 ++--
 src/soc/intel/skylake/cpu.c                       |  2 +-
 src/soc/intel/skylake/include/soc/msr.h           |  2 +-
 src/soc/intel/skylake/smmrelocate.c               | 12 +--
 47 files changed, 497 insertions(+), 500 deletions(-)

diff --git a/src/cpu/amd/agesa/s3_resume.c b/src/cpu/amd/agesa/s3_resume.c
index 98671f4..17364de 100644
--- a/src/cpu/amd/agesa/s3_resume.c
+++ b/src/cpu/amd/agesa/s3_resume.c
@@ -81,15 +81,15 @@ static void set_resume_cache(void)
 	/* Enable caching for 0 - coreboot ram using variable mtrr */
 	msr.lo = 0 | MTRR_TYPE_WRBACK;
 	msr.hi = 0;
-	wrmsr(MTRRphysBase_MSR(0), msr);
-	msr.lo = ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(0), msr);
+	msr.lo = ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID;
 	msr.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(0), msr);
+	wrmsr(MTRR_PHYS_MASK(0), msr);
 
 	/* Set the default memory type and disable fixed and enable variable MTRRs */
 	msr.hi = 0;
 	msr.lo = (1 << 11);
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 
 	enable_cache();
 }
diff --git a/src/cpu/amd/car/cache_as_ram.inc b/src/cpu/amd/car/cache_as_ram.inc
index 133daac..0b2bc60 100644
--- a/src/cpu/amd/car/cache_as_ram.inc
+++ b/src/cpu/amd/car/cache_as_ram.inc
@@ -76,9 +76,9 @@ cache_as_ram_setup:
 	cvtsd2si %xmm3, %ebx
 
 	/* Check if cpu_init_detected. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$MTRRdefTypeEn, %eax
+	andl	$MTRR_DEF_TYPE_EN, %eax
 	movl	%eax, %ebx	/* We store the status. */
 
 	jmp_if_k8(CAR_FAM10_out_post_errata)
@@ -270,27 +270,27 @@ clear_fixed_var_mtrr_out:
 
 #if CacheSize > 0x8000
 	/* Enable caching for 32K-64K using fixed MTRR. */
-	movl	$MTRRfix4K_C0000_MSR, %ecx
+	movl	$MTRR_FIX_4K_C0000, %ecx
 	simplemask CacheSize, 0x8000
 	wrmsr
 #endif
 
 #if CacheSize > 0x10000
 	/* Enable caching for 64K-96K using fixed MTRR. */
-	movl	$MTRRfix4K_D0000_MSR, %ecx
+	movl	$MTRR_FIX_4K_D0000, %ecx
 	simplemask CacheSize, 0x10000
 	wrmsr
 #endif
 
 #if CacheSize > 0x18000
 	/* Enable caching for 96K-128K using fixed MTRR. */
-	movl	$MTRRfix4K_D8000_MSR, %ecx
+	movl	$MTRR_FIX_4K_D8000, %ecx
 	simplemask CacheSize, 0x18000
 	wrmsr
 #endif
 
 	/* Enable caching for 0-32K using fixed MTRR. */
-	movl	$MTRRfix4K_C8000_MSR, %ecx
+	movl	$MTRR_FIX_4K_C8000, %ecx
 	simplemask CacheSize, 0
 	wrmsr
 
@@ -305,7 +305,7 @@ clear_fixed_var_mtrr_out:
 	/* Enable write base caching so we can do execute in place (XIP)
 	 * on the flash ROM.
 	 */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -316,19 +316,19 @@ clear_fixed_var_mtrr_out:
 	orl	$MTRR_TYPE_WRBACK, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$0xff, %edx /* (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1 for K8 (CONFIG_CPU_ADDR_BITS = 40) */
 	jmp_if_k8(wbcache_post_fam10_setup)
 	movl	$0xffff, %edx /* (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1 for FAM10 (CONFIG_CPU_ADDR_BITS = 48) */
 wbcache_post_fam10_setup:
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 #endif /* CONFIG_XIP_ROM_SIZE */
 
 	/* Set the default memory type and enable fixed and variable MTRRs. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	xorl	%edx, %edx
-	movl	$(MTRRdefTypeEn | MTRRdefTypeFixEn), %eax
+	movl	$(MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN), %eax
 	wrmsr
 
 	/* Enable the MTRRs and IORRs in SYSCFG. */
@@ -462,35 +462,35 @@ cache_as_ram_switch_stack:
 
 all_mtrr_msrs:
 	/* fixed MTRR MSRs */
-	.long	MTRRfix64K_00000_MSR
-	.long	MTRRfix16K_80000_MSR
-	.long	MTRRfix16K_A0000_MSR
-	.long	MTRRfix4K_C0000_MSR
-	.long	MTRRfix4K_C8000_MSR
-	.long	MTRRfix4K_D0000_MSR
-	.long	MTRRfix4K_D8000_MSR
-	.long	MTRRfix4K_E0000_MSR
-	.long	MTRRfix4K_E8000_MSR
-	.long	MTRRfix4K_F0000_MSR
-	.long	MTRRfix4K_F8000_MSR
+	.long	MTRR_FIX_64K_00000
+	.long	MTRR_FIX_16K_80000
+	.long	MTRR_FIX_16K_A0000
+	.long	MTRR_FIX_4K_C0000
+	.long	MTRR_FIX_4K_C8000
+	.long	MTRR_FIX_4K_D0000
+	.long	MTRR_FIX_4K_D8000
+	.long	MTRR_FIX_4K_E0000
+	.long	MTRR_FIX_4K_E8000
+	.long	MTRR_FIX_4K_F0000
+	.long	MTRR_FIX_4K_F8000
 
 	/* var MTRR MSRs */
-	.long	MTRRphysBase_MSR(0)
-	.long	MTRRphysMask_MSR(0)
-	.long	MTRRphysBase_MSR(1)
-	.long	MTRRphysMask_MSR(1)
-	.long	MTRRphysBase_MSR(2)
-	.long	MTRRphysMask_MSR(2)
-	.long	MTRRphysBase_MSR(3)
-	.long	MTRRphysMask_MSR(3)
-	.long	MTRRphysBase_MSR(4)
-	.long	MTRRphysMask_MSR(4)
-	.long	MTRRphysBase_MSR(5)
-	.long	MTRRphysMask_MSR(5)
-	.long	MTRRphysBase_MSR(6)
-	.long	MTRRphysMask_MSR(6)
-	.long	MTRRphysBase_MSR(7)
-	.long	MTRRphysMask_MSR(7)
+	.long	MTRR_PHYS_BASE(0)
+	.long	MTRR_PHYS_MASK(0)
+	.long	MTRR_PHYS_BASE(1)
+	.long	MTRR_PHYS_MASK(1)
+	.long	MTRR_PHYS_BASE(2)
+	.long	MTRR_PHYS_MASK(2)
+	.long	MTRR_PHYS_BASE(3)
+	.long	MTRR_PHYS_MASK(3)
+	.long	MTRR_PHYS_BASE(4)
+	.long	MTRR_PHYS_MASK(4)
+	.long	MTRR_PHYS_BASE(5)
+	.long	MTRR_PHYS_MASK(5)
+	.long	MTRR_PHYS_BASE(6)
+	.long	MTRR_PHYS_MASK(6)
+	.long	MTRR_PHYS_BASE(7)
+	.long	MTRR_PHYS_MASK(7)
 
 	/* Variable IORR MTRR MSRs */
 	.long	IORRBase_MSR(0)
diff --git a/src/cpu/amd/car/disable_cache_as_ram.c b/src/cpu/amd/car/disable_cache_as_ram.c
index d3a3812..3b464b8 100644
--- a/src/cpu/amd/car/disable_cache_as_ram.c
+++ b/src/cpu/amd/car/disable_cache_as_ram.c
@@ -33,15 +33,15 @@ static inline __attribute__((always_inline)) void disable_cache_as_ram(void)
 
 	msr.lo = 0;
 	msr.hi = 0;
-	wrmsr(MTRRfix4K_C8000_MSR, msr);
+	wrmsr(MTRR_FIX_4K_C8000, msr);
 #if CONFIG_DCACHE_RAM_SIZE > 0x8000
-	wrmsr(MTRRfix4K_C0000_MSR, msr);
+	wrmsr(MTRR_FIX_4K_C0000, msr);
 #endif
 #if CONFIG_DCACHE_RAM_SIZE > 0x10000
-	wrmsr(MTRRfix4K_D0000_MSR, msr);
+	wrmsr(MTRR_FIX_4K_D0000, msr);
 #endif
 #if CONFIG_DCACHE_RAM_SIZE > 0x18000
-	wrmsr(MTRRfix4K_D8000_MSR, msr);
+	wrmsr(MTRR_FIX_4K_D8000, msr);
 #endif
 	/* disable fixed mtrr from now on, it will be enabled by ramstage again*/
 
@@ -53,7 +53,7 @@ static inline __attribute__((always_inline)) void disable_cache_as_ram(void)
 	msr.hi = 0;
 	msr.lo = (1 << 11);
 
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 
 	enable_cache();
 }
diff --git a/src/cpu/amd/model_fxx/model_fxx_init.c b/src/cpu/amd/model_fxx/model_fxx_init.c
index a6561ee..268f1b2 100644
--- a/src/cpu/amd/model_fxx/model_fxx_init.c
+++ b/src/cpu/amd/model_fxx/model_fxx_init.c
@@ -105,12 +105,12 @@ static void save_mtrr_state(struct mtrr_state *state)
 {
 	int i;
 	for (i = 0; i < MTRR_COUNT; i++) {
-		state->mtrrs[i].base = rdmsr(MTRRphysBase_MSR(i));
-		state->mtrrs[i].mask = rdmsr(MTRRphysMask_MSR(i));
+		state->mtrrs[i].base = rdmsr(MTRR_PHYS_BASE(i));
+		state->mtrrs[i].mask = rdmsr(MTRR_PHYS_MASK(i));
 	}
 	state->top_mem = rdmsr(TOP_MEM);
 	state->top_mem2 = rdmsr(TOP_MEM2);
-	state->def_type = rdmsr(MTRRdefType_MSR);
+	state->def_type = rdmsr(MTRR_DEF_TYPE_MSR);
 }
 
 static void restore_mtrr_state(struct mtrr_state *state)
@@ -119,12 +119,12 @@ static void restore_mtrr_state(struct mtrr_state *state)
 	disable_cache();
 
 	for (i = 0; i < MTRR_COUNT; i++) {
-		wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base);
-		wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask);
+		wrmsr(MTRR_PHYS_BASE(i), state->mtrrs[i].base);
+		wrmsr(MTRR_PHYS_MASK(i), state->mtrrs[i].mask);
 	}
 	wrmsr(TOP_MEM, state->top_mem);
 	wrmsr(TOP_MEM2, state->top_mem2);
-	wrmsr(MTRRdefType_MSR, state->def_type);
+	wrmsr(MTRR_DEF_TYPE_MSR, state->def_type);
 
 	enable_cache();
 }
@@ -158,22 +158,22 @@ static void set_init_ecc_mtrrs(void)
 	for (i = 0; i < MTRR_COUNT; i++) {
 		msr_t zero;
 		zero.lo = zero.hi = 0;
-		wrmsr(MTRRphysBase_MSR(i), zero);
-		wrmsr(MTRRphysMask_MSR(i), zero);
+		wrmsr(MTRR_PHYS_BASE(i), zero);
+		wrmsr(MTRR_PHYS_MASK(i), zero);
 	}
 
 	/* Write back cache the first 1MB */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000000 | MTRR_TYPE_WRBACK;
-	wrmsr(MTRRphysBase_MSR(0), msr);
+	wrmsr(MTRR_PHYS_BASE(0), msr);
 	msr.hi = 0x000000ff;
 	msr.lo = ~((CONFIG_RAMTOP) - 1) | 0x800;
-	wrmsr(MTRRphysMask_MSR(0), msr);
+	wrmsr(MTRR_PHYS_MASK(0), msr);
 
 	/* Set the default type to write combining */
 	msr.hi = 0x00000000;
 	msr.lo = 0xc00 | MTRR_TYPE_WRCOMB;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 
 	/* Set TOP_MEM to 4G */
 	msr.hi = 0x00000001;
diff --git a/src/cpu/amd/pi/s3_resume.c b/src/cpu/amd/pi/s3_resume.c
index 943fd97..88b5713 100644
--- a/src/cpu/amd/pi/s3_resume.c
+++ b/src/cpu/amd/pi/s3_resume.c
@@ -271,15 +271,15 @@ static void set_resume_cache(void)
 	/* Enable caching for 0 - coreboot ram using variable mtrr */
 	msr.lo = 0 | MTRR_TYPE_WRBACK;
 	msr.hi = 0;
-	wrmsr(MTRRphysBase_MSR(0), msr);
-	msr.lo = ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(0), msr);
+	msr.lo = ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID;
 	msr.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(0), msr);
+	wrmsr(MTRR_PHYS_MASK(0), msr);
 
 	/* Set the default memory type and disable fixed and enable variable MTRRs */
 	msr.hi = 0;
 	msr.lo = (1 << 11);
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 
 	enable_cache();
 }
diff --git a/src/cpu/amd/smm/smm_init.c b/src/cpu/amd/smm/smm_init.c
index 2e9a4c9..e13f24f 100644
--- a/src/cpu/amd/smm/smm_init.c
+++ b/src/cpu/amd/smm/smm_init.c
@@ -39,7 +39,7 @@ void smm_init(void)
 
 	/* Back up MSRs for later restore */
 	syscfg_orig = rdmsr(SYSCFG_MSR);
-	mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);
+	mtrr_aseg_orig = rdmsr(MTRR_FIX_16K_A0000);
 
 	/* MTRR changes don't like an enabled cache */
 	disable_cache();
@@ -57,7 +57,7 @@ void smm_init(void)
 	/* set DRAM access to 0xa0000 */
 	msr.lo = 0x18181818;
 	msr.hi = 0x18181818;
-	wrmsr(MTRRfix16K_A0000_MSR, msr);
+	wrmsr(MTRR_FIX_16K_A0000, msr);
 
 	/* enable the extended features */
 	msr = syscfg_orig;
@@ -73,7 +73,7 @@ void smm_init(void)
 
 	/* Restore SYSCFG and MTRR */
 	wrmsr(SYSCFG_MSR, syscfg_orig);
-	wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
+	wrmsr(MTRR_FIX_16K_A0000, mtrr_aseg_orig);
 	enable_cache();
 
 	/* CPU MSR are set in CPU init */
diff --git a/src/cpu/intel/car/cache_as_ram.inc b/src/cpu/intel/car/cache_as_ram.inc
index f9be6e8..6ef8604 100644
--- a/src/cpu/intel/car/cache_as_ram.inc
+++ b/src/cpu/intel/car/cache_as_ram.inc
@@ -54,7 +54,7 @@ CacheAsRam:
 	 */
 	xorl	%eax, %eax
 	xorl	%edx, %edx
-	movl	$MTRRfix64K_00000_MSR, %ecx
+	movl	$MTRR_FIX_64K_00000, %ecx
 	wrmsr
 
 	/*
@@ -102,16 +102,16 @@ SIPI_Delay:
 
 	/* Wait for the Logical AP to complete initialization. */
 LogicalAP_SIPINotdone:
-	movl	$MTRRfix64K_00000_MSR, %ecx
+	movl	$MTRR_FIX_64K_00000, %ecx
 	rdmsr
 	orl	%eax, %eax
 	jz	LogicalAP_SIPINotdone
 
 NotHtProcessor:
 	/* Set the default memory type and enable fixed and variable MTRRs. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	xorl	%edx, %edx
-	movl	$(MTRRdefTypeEn | MTRRdefTypeFixEn), %eax
+	movl	$(MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN), %eax
 	wrmsr
 
 	/* Clear all MTRRs. */
@@ -131,35 +131,35 @@ clear_fixed_var_mtrr:
 
 all_mtrr_msrs:
 	/* fixed MTRR MSRs */
-	.long	MTRRfix64K_00000_MSR
-	.long	MTRRfix16K_80000_MSR
-	.long	MTRRfix16K_A0000_MSR
-	.long	MTRRfix4K_C0000_MSR
-	.long	MTRRfix4K_C8000_MSR
-	.long	MTRRfix4K_D0000_MSR
-	.long	MTRRfix4K_D8000_MSR
-	.long	MTRRfix4K_E0000_MSR
-	.long	MTRRfix4K_E8000_MSR
-	.long	MTRRfix4K_F0000_MSR
-	.long	MTRRfix4K_F8000_MSR
+	.long	MTRR_FIX_64K_00000
+	.long	MTRR_FIX_16K_80000
+	.long	MTRR_FIX_16K_A0000
+	.long	MTRR_FIX_4K_C0000
+	.long	MTRR_FIX_4K_C8000
+	.long	MTRR_FIX_4K_D0000
+	.long	MTRR_FIX_4K_D8000
+	.long	MTRR_FIX_4K_E0000
+	.long	MTRR_FIX_4K_E8000
+	.long	MTRR_FIX_4K_F0000
+	.long	MTRR_FIX_4K_F8000
 
 	/* var MTRR MSRs */
-	.long	MTRRphysBase_MSR(0)
-	.long	MTRRphysMask_MSR(0)
-	.long	MTRRphysBase_MSR(1)
-	.long	MTRRphysMask_MSR(1)
-	.long	MTRRphysBase_MSR(2)
-	.long	MTRRphysMask_MSR(2)
-	.long	MTRRphysBase_MSR(3)
-	.long	MTRRphysMask_MSR(3)
-	.long	MTRRphysBase_MSR(4)
-	.long	MTRRphysMask_MSR(4)
-	.long	MTRRphysBase_MSR(5)
-	.long	MTRRphysMask_MSR(5)
-	.long	MTRRphysBase_MSR(6)
-	.long	MTRRphysMask_MSR(6)
-	.long	MTRRphysBase_MSR(7)
-	.long	MTRRphysMask_MSR(7)
+	.long	MTRR_PHYS_BASE(0)
+	.long	MTRR_PHYS_MASK(0)
+	.long	MTRR_PHYS_BASE(1)
+	.long	MTRR_PHYS_MASK(1)
+	.long	MTRR_PHYS_BASE(2)
+	.long	MTRR_PHYS_MASK(2)
+	.long	MTRR_PHYS_BASE(3)
+	.long	MTRR_PHYS_MASK(3)
+	.long	MTRR_PHYS_BASE(4)
+	.long	MTRR_PHYS_MASK(4)
+	.long	MTRR_PHYS_BASE(5)
+	.long	MTRR_PHYS_MASK(5)
+	.long	MTRR_PHYS_BASE(6)
+	.long	MTRR_PHYS_MASK(6)
+	.long	MTRR_PHYS_BASE(7)
+	.long	MTRR_PHYS_MASK(7)
 
 	.long	0x000 /* NULL, end of table */
 
@@ -219,13 +219,13 @@ clear_fixed_var_mtrr_out:
 
 #if CacheSize > 0x8000
 	/* Enable caching for 32K-64K using fixed MTRR. */
-	movl	$MTRRfix4K_C0000_MSR, %ecx
+	movl	$MTRR_FIX_4K_C0000, %ecx
 	simplemask CacheSize, 0x8000
 	wrmsr
 #endif
 
 	/* Enable caching for 0-32K using fixed MTRR. */
-	movl	$MTRRfix4K_C8000_MSR, %ecx
+	movl	$MTRR_FIX_4K_C8000, %ecx
 	simplemask CacheSize, 0
 	wrmsr
 
@@ -235,7 +235,7 @@ clear_fixed_var_mtrr_out:
 	 * Enable write base caching so we can do execute in place (XIP)
 	 * on the flash ROM.
 	 */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -246,9 +246,9 @@ clear_fixed_var_mtrr_out:
 	orl	$MTRR_TYPE_WRBACK, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$0x0000000f, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 #endif /* CONFIG_XIP_ROM_SIZE */
 
@@ -332,13 +332,13 @@ lout:
 	movl	%eax, %cr0
 
 	/* Clear sth. */
-	movl	$MTRRfix4K_C8000_MSR, %ecx
+	movl	$MTRR_FIX_4K_C8000, %ecx
 	xorl	%edx, %edx
 	xorl	%eax, %eax
 	wrmsr
 
 #if CONFIG_DCACHE_RAM_SIZE > 0x8000
-	movl	$MTRRfix4K_C0000_MSR, %ecx
+	movl	$MTRR_FIX_4K_C0000, %ecx
 	wrmsr
 #endif
 
@@ -346,9 +346,9 @@ lout:
 	 * Set the default memory type and disable fixed
 	 * and enable variable MTRRs.
 	 */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	xorl	%edx, %edx
-	movl	$MTRRdefTypeEn, %eax /* Enable variable and disable fixed MTRRs. */
+	movl	$MTRR_DEF_TYPE_EN, %eax /* Enable variable and disable fixed MTRRs. */
 	wrmsr
 
 	/* Enable cache. */
diff --git a/src/cpu/intel/car/cache_as_ram_ht.inc b/src/cpu/intel/car/cache_as_ram_ht.inc
index 193ad41..6eb50ba 100644
--- a/src/cpu/intel/car/cache_as_ram_ht.inc
+++ b/src/cpu/intel/car/cache_as_ram_ht.inc
@@ -61,7 +61,7 @@ clear_mtrrs:
 	post_code(0x21)
 
 	/* Configure the default memory type to uncacheable. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
 	andl	$(~0x00000cff), %eax
 	wrmsr
@@ -95,9 +95,9 @@ addrsize_no_MSR:
 	 */
 addrsize_set_high:
 	xorl	%eax, %eax
-	movl	$MTRRphysMask_MSR(0), %ecx
+	movl	$MTRR_PHYS_MASK(0), %ecx
 	wrmsr
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	wrmsr
 	movl	$LAPIC_BASE_MSR, %ecx
 	not	%edx
@@ -188,7 +188,7 @@ hyper_threading_cpu:
 	post_code(0x26)
 
 	/* Wait for sibling CPU to start. */
-1:	movl	$(MTRRphysBase_MSR(0)), %ecx
+1:	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	rdmsr
 	andl	%eax, %eax
 	jnz	sipi_complete
@@ -211,7 +211,7 @@ ap_init:
 	post_code(0x28)
 
 	/* MTRR registers are shared between HT siblings. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(1<<12), %eax
 	xorl	%edx, %edx
 	wrmsr
@@ -230,21 +230,21 @@ sipi_complete:
 	post_code(0x2a)
 
 	/* Set Cache-as-RAM base address. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
 
 	/* Set Cache-as-RAM mask. */
-	movl	$(MTRRphysMask_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_MASK(0)), %ecx
 	rdmsr
-	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x2b)
@@ -308,7 +308,7 @@ no_msr_11e:
 
 #if CONFIG_XIP_ROM_SIZE
 	/* Enable cache for our code in Flash because we do XIP here */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -319,9 +319,9 @@ no_msr_11e:
 	orl	$MTRR_TYPE_WRBACK, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	rdmsr
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 #endif /* CONFIG_XIP_ROM_SIZE */
 
@@ -356,9 +356,9 @@ no_msr_11e:
 	post_code(0x34)
 
 	/* Disable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$(~MTRRdefTypeEn), %eax
+	andl	$(~MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	post_code(0x35)
@@ -382,24 +382,24 @@ no_msr_11e:
 	post_code(0x38)
 
 	/* Enable Write Back and Speculative Reads for low RAM. */
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 	movl	$(0x00000000 | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(0), %ecx
+	movl	$MTRR_PHYS_MASK(0), %ecx
 	rdmsr
-	movl	$(~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 #if CACHE_ROM_SIZE
 	/* Enable caching and Speculative Reads for Flash ROM device. */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	movl	$(CACHE_ROM_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	rdmsr
-	movl	$(~(CACHE_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 #endif
 
@@ -413,9 +413,9 @@ no_msr_11e:
 	post_code(0x3a)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x3b)
diff --git a/src/cpu/intel/fsp_model_406dx/bootblock.c b/src/cpu/intel/fsp_model_406dx/bootblock.c
index a685eaa..ef94991 100644
--- a/src/cpu/intel/fsp_model_406dx/bootblock.c
+++ b/src/cpu/intel/fsp_model_406dx/bootblock.c
@@ -56,10 +56,10 @@ static void set_var_mtrr(int reg, uint32_t base, uint32_t size, int type)
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -74,7 +74,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void set_no_evict_mode_msr(void)
diff --git a/src/cpu/intel/haswell/bootblock.c b/src/cpu/intel/haswell/bootblock.c
index f5d0f6c..8d0c53c 100644
--- a/src/cpu/intel/haswell/bootblock.c
+++ b/src/cpu/intel/haswell/bootblock.c
@@ -44,10 +44,10 @@ static void set_var_mtrr(
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -61,7 +61,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void set_flex_ratio_to_tdp_nominal(void)
@@ -113,12 +113,12 @@ static void set_flex_ratio_to_tdp_nominal(void)
 static void check_for_clean_reset(void)
 {
 	msr_t msr;
-	msr = rdmsr(MTRRdefType_MSR);
+	msr = rdmsr(MTRR_DEF_TYPE_MSR);
 
 	/* Use the MTRR default type MSR as a proxy for detecting INIT#.
 	 * Reset the system if any known bits are set in that MSR. That is
 	 * an indication of the CPU not being properly reset. */
-	if (msr.lo & (MTRRdefTypeEn | MTRRdefTypeFixEn)) {
+	if (msr.lo & (MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN)) {
 		outb(0x0, 0xcf9);
 		outb(0x6, 0xcf9);
 		halt();
diff --git a/src/cpu/intel/haswell/cache_as_ram.inc b/src/cpu/intel/haswell/cache_as_ram.inc
index 0978bfb..b8df2a1 100644
--- a/src/cpu/intel/haswell/cache_as_ram.inc
+++ b/src/cpu/intel/haswell/cache_as_ram.inc
@@ -73,31 +73,31 @@ clear_mtrrs:
 
 	post_code(0x22)
 	/* Configure the default memory type to uncacheable. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
 	andl	$(~0x00000cff), %eax
 	wrmsr
 
 	post_code(0x23)
 	/* Set Cache-as-RAM base address. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
 
 	post_code(0x24)
 	/* Set Cache-as-RAM mask. */
-	movl	$(MTRRphysMask_MSR(0)), %ecx
-	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(MTRR_PHYS_MASK(0)), %ecx
+	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 	post_code(0x25)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	/* Enable cache (CR0.CD = 0, CR0.NW = 0). */
@@ -134,7 +134,7 @@ clear_mtrrs:
 	movl	%eax, %cr0
 
 	/* Enable cache for our code in Flash because we do XIP here */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -145,19 +145,19 @@ clear_mtrrs:
 	orl	$MTRR_TYPE_WRPROT, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$CPU_PHYSMASK_HI, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	post_code(0x27)
 	/* Enable caching for ram init code to run faster */
-	movl	$MTRRphysBase_MSR(2), %ecx
+	movl	$MTRR_PHYS_BASE(2), %ecx
 	movl	$(CACHE_MRC_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(2), %ecx
-	movl	$(CACHE_MRC_MASK | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(2), %ecx
+	movl	$(CACHE_MRC_MASK | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
@@ -197,9 +197,9 @@ before_romstage:
 	post_code(0x31)
 
 	/* Disable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$(~MTRRdefTypeEn), %eax
+	andl	$(~MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	post_code(0x31)
@@ -220,9 +220,9 @@ before_romstage:
 	/* Clear MTRR that was used to cache MRC */
 	xorl	%eax, %eax
 	xorl	%edx, %edx
-	movl	$MTRRphysBase_MSR(2), %ecx
+	movl	$MTRR_PHYS_BASE(2), %ecx
 	wrmsr
-	movl	$MTRRphysMask_MSR(2), %ecx
+	movl	$MTRR_PHYS_MASK(2), %ecx
 	wrmsr
 
 	post_code(0x33)
@@ -246,7 +246,7 @@ before_romstage:
 
 	/* Get number of MTRRs. */
 	popl	%ebx
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 1:
 	testl	%ebx, %ebx
 	jz	1f
@@ -279,9 +279,9 @@ before_romstage:
 	post_code(0x3a)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x3b)
diff --git a/src/cpu/intel/haswell/romstage.c b/src/cpu/intel/haswell/romstage.c
index 9c238ca..3bb1090 100644
--- a/src/cpu/intel/haswell/romstage.c
+++ b/src/cpu/intel/haswell/romstage.c
@@ -117,14 +117,14 @@ static void *setup_romstage_stack_after_car(void)
 
 	/* Cache the ROM as WP just below 4GiB. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
 	num_mtrrs++;
 
 	/* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -135,7 +135,7 @@ static void *setup_romstage_stack_after_car(void)
 	 * be 8MiB aligned. Set this area as cacheable so it can be used later
 	 * for ramstage before setting up the entire RAM as cacheable. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -146,7 +146,7 @@ static void *setup_romstage_stack_after_car(void)
 	 * to cacheable it provides faster access when relocating the SMM
 	 * handler as well as using the TSEG region for other purposes. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
diff --git a/src/cpu/intel/haswell/smmrelocate.c b/src/cpu/intel/haswell/smmrelocate.c
index ab94f9a..00e2d55 100644
--- a/src/cpu/intel/haswell/smmrelocate.c
+++ b/src/cpu/intel/haswell/smmrelocate.c
@@ -73,8 +73,8 @@ static inline void write_smrr(struct smm_relocation_params *relo_params)
 {
 	printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
 	       relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
-	wrmsr(SMRRphysBase_MSR, relo_params->smrr_base);
-	wrmsr(SMRRphysMask_MSR, relo_params->smrr_mask);
+	wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base);
+	wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask);
 }
 
 static inline void write_emrr(struct smm_relocation_params *relo_params)
@@ -214,7 +214,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
 	update_save_state(cpu, relo_params, runtime);
 
 	/* Write EMRR and SMRR MSRs based on indicated support. */
-	mtrr_cap = rdmsr(MTRRcap_MSR);
+	mtrr_cap = rdmsr(MTRR_CAP_MSR);
 	if (mtrr_cap.lo & SMRR_SUPPORTED)
 		write_smrr(relo_params);
 
@@ -272,7 +272,7 @@ static void fill_in_relocation_params(struct device *dev,
 	/* SMRR has 32-bits of valid address aligned to 4KiB. */
 	params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
 	params->smrr_base.hi = 0;
-	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRRphysMaskValid;
+	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
 	params->smrr_mask.hi = 0;
 
 	/* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
@@ -283,14 +283,14 @@ static void fill_in_relocation_params(struct device *dev,
 	 * on the number of physical address bits supported. */
 	params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
 	params->emrr_base.hi = 0;
-	params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRRphysMaskValid;
+	params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
 	params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
 
 	/* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */
 	params->uncore_emrr_base.lo = emrr_base;
 	params->uncore_emrr_base.hi = 0;
 	params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) |
-	                              MTRRphysMaskValid;
+	                              MTRR_PHYS_MASK_VALID;
 	params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1;
 }
 
diff --git a/src/cpu/intel/model_2065x/bootblock.c b/src/cpu/intel/model_2065x/bootblock.c
index b6a2442..edffe14 100644
--- a/src/cpu/intel/model_2065x/bootblock.c
+++ b/src/cpu/intel/model_2065x/bootblock.c
@@ -43,10 +43,10 @@ static void set_var_mtrr(
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -60,7 +60,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void set_flex_ratio_to_tdp_nominal(void)
diff --git a/src/cpu/intel/model_2065x/cache_as_ram.inc b/src/cpu/intel/model_2065x/cache_as_ram.inc
index cfa3b6b..f36af2b 100644
--- a/src/cpu/intel/model_2065x/cache_as_ram.inc
+++ b/src/cpu/intel/model_2065x/cache_as_ram.inc
@@ -48,8 +48,8 @@ wait_for_sipi:
 	jc	wait_for_sipi
 
 	post_code(0x21)
-	/* Clean-up MTRRdefType_MSR. */
-	movl	$MTRRdefType_MSR, %ecx
+	/* Clean-up MTRR_DEF_TYPE_MSR. */
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	xorl	%eax, %eax
 	xorl	%edx, %edx
 	wrmsr
@@ -69,7 +69,7 @@ clear_mtrrs:
 	jnz	clear_mtrrs
 
 	/* Zero out all variable range MTRRs. */
-	movl	$MTRRcap_MSR, %ecx
+	movl	$MTRR_CAP_MSR, %ecx
 	rdmsr
 	andl	$0xff, %eax
 	shl	$1, %eax
@@ -85,24 +85,24 @@ clear_var_mtrrs:
 
 	post_code(0x23)
 	/* Set Cache-as-RAM base address. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
 
 	post_code(0x24)
 	/* Set Cache-as-RAM mask. */
-	movl	$(MTRRphysMask_MSR(0)), %ecx
-	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(MTRR_PHYS_MASK(0)), %ecx
+	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 	post_code(0x25)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	/* Enable cache (CR0.CD = 0, CR0.NW = 0). */
@@ -139,7 +139,7 @@ clear_var_mtrrs:
 	movl	%eax, %cr0
 
 	/* Enable cache for our code in Flash because we do XIP here */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -150,9 +150,9 @@ clear_var_mtrrs:
 	orl	$MTRR_TYPE_WRPROT, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$CPU_PHYSMASK_HI, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	post_code(0x27)
@@ -189,9 +189,9 @@ before_romstage:
 	post_code(0x31)
 
 	/* Disable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$(~MTRRdefTypeEn), %eax
+	andl	$(~MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	post_code(0x31)
@@ -228,12 +228,12 @@ before_romstage:
 	/* Enable Write Back and Speculative Reads for the first MB
 	 * and ramstage.
 	 */
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 	movl	$(0x00000000 | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(0), %ecx
-	movl	$(~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(0), %ecx
+	movl	$(~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx	// 36bit address space
 	wrmsr
 
@@ -241,12 +241,12 @@ before_romstage:
 	/* Enable Caching and speculative Reads for the
 	 * complete ROM now that we actually have RAM.
 	 */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	movl	$(CACHE_ROM_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(1), %ecx
-	movl	$(~(CACHE_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(1), %ecx
+	movl	$(~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 #endif
@@ -261,9 +261,9 @@ before_romstage:
 	post_code(0x3a)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x3b)
diff --git a/src/cpu/intel/model_206ax/bootblock.c b/src/cpu/intel/model_206ax/bootblock.c
index d41afb2..416b484 100644
--- a/src/cpu/intel/model_206ax/bootblock.c
+++ b/src/cpu/intel/model_206ax/bootblock.c
@@ -44,10 +44,10 @@ static void set_var_mtrr(
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -61,7 +61,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void set_flex_ratio_to_tdp_nominal(void)
diff --git a/src/cpu/intel/model_206ax/cache_as_ram.inc b/src/cpu/intel/model_206ax/cache_as_ram.inc
index a3f1c64..04c0808 100644
--- a/src/cpu/intel/model_206ax/cache_as_ram.inc
+++ b/src/cpu/intel/model_206ax/cache_as_ram.inc
@@ -68,31 +68,31 @@ clear_mtrrs:
 
 	post_code(0x22)
 	/* Configure the default memory type to uncacheable. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
 	andl	$(~0x00000cff), %eax
 	wrmsr
 
 	post_code(0x23)
 	/* Set Cache-as-RAM base address. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
 
 	post_code(0x24)
 	/* Set Cache-as-RAM mask. */
-	movl	$(MTRRphysMask_MSR(0)), %ecx
-	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(MTRR_PHYS_MASK(0)), %ecx
+	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 	post_code(0x25)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	/* Enable cache (CR0.CD = 0, CR0.NW = 0). */
@@ -129,7 +129,7 @@ clear_mtrrs:
 	movl	%eax, %cr0
 
 	/* Enable cache for our code in Flash because we do XIP here */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -140,19 +140,19 @@ clear_mtrrs:
 	orl	$MTRR_TYPE_WRPROT, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$CPU_PHYSMASK_HI, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	post_code(0x27)
 	/* Enable caching for ram init code to run faster */
-	movl	$MTRRphysBase_MSR(2), %ecx
+	movl	$MTRR_PHYS_BASE(2), %ecx
 	movl	$(CACHE_MRC_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(2), %ecx
-	movl	$(CACHE_MRC_MASK | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(2), %ecx
+	movl	$(CACHE_MRC_MASK | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
@@ -189,9 +189,9 @@ before_romstage:
 	post_code(0x31)
 
 	/* Disable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$(~MTRRdefTypeEn), %eax
+	andl	$(~MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	post_code(0x31)
@@ -212,9 +212,9 @@ before_romstage:
 	/* Clear MTRR that was used to cache MRC */
 	xorl	%eax, %eax
 	xorl	%edx, %edx
-	movl	$MTRRphysBase_MSR(2), %ecx
+	movl	$MTRR_PHYS_BASE(2), %ecx
 	wrmsr
-	movl	$MTRRphysMask_MSR(2), %ecx
+	movl	$MTRR_PHYS_MASK(2), %ecx
 	wrmsr
 
 	post_code(0x33)
@@ -236,12 +236,12 @@ before_romstage:
 	/* Enable Write Back and Speculative Reads for the first MB
 	 * and ramstage.
 	 */
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 	movl	$(0x00000000 | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(0), %ecx
-	movl	$(~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(0), %ecx
+	movl	$(~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx	// 36bit address space
 	wrmsr
 
@@ -249,12 +249,12 @@ before_romstage:
 	/* Enable Caching and speculative Reads for the
 	 * complete ROM now that we actually have RAM.
 	 */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	movl	$(CACHE_ROM_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(1), %ecx
-	movl	$(~(CACHE_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(1), %ecx
+	movl	$(~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 #endif
@@ -269,9 +269,9 @@ before_romstage:
 	post_code(0x3a)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x3b)
diff --git a/src/cpu/intel/model_6ex/cache_as_ram.inc b/src/cpu/intel/model_6ex/cache_as_ram.inc
index 271b756..16244fb 100644
--- a/src/cpu/intel/model_6ex/cache_as_ram.inc
+++ b/src/cpu/intel/model_6ex/cache_as_ram.inc
@@ -52,27 +52,27 @@ clear_mtrrs:
 	jnz	clear_mtrrs
 
 	/* Configure the default memory type to uncacheable. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
 	andl	$(~0x00000cff), %eax
 	wrmsr
 
 	/* Set Cache-as-RAM base address. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
 
 	/* Set Cache-as-RAM mask. */
-	movl	$(MTRRphysMask_MSR(0)), %ecx
-	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(MTRR_PHYS_MASK(0)), %ecx
+	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	/* Enable L2 cache. */
@@ -102,7 +102,7 @@ clear_mtrrs:
 
 #if CONFIG_XIP_ROM_SIZE
 	/* Enable cache for our code in Flash because we do XIP here */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -113,9 +113,9 @@ clear_mtrrs:
 	orl	$MTRR_TYPE_WRBACK, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$CPU_PHYSMASK_HI, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 #endif /* CONFIG_XIP_ROM_SIZE */
 
@@ -150,9 +150,9 @@ clear_mtrrs:
 	post_code(0x31)
 
 	/* Disable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$(~MTRRdefTypeEn), %eax
+	andl	$(~MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	post_code(0x31)
@@ -176,23 +176,23 @@ clear_mtrrs:
 	post_code(0x38)
 
 	/* Enable Write Back and Speculative Reads for low RAM. */
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 	movl	$(0x00000000 | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(0), %ecx
-	movl	$(~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(0), %ecx
+	movl	$(~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 #if CACHE_ROM_SIZE
 	/* Enable caching and Speculative Reads for Flash ROM device. */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	movl	$(CACHE_ROM_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(1), %ecx
-	movl	$(~(CACHE_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(1), %ecx
+	movl	$(~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 #endif
@@ -207,9 +207,9 @@ clear_mtrrs:
 	post_code(0x3a)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x3b)
diff --git a/src/cpu/intel/smm/gen1/smmrelocate.c b/src/cpu/intel/smm/gen1/smmrelocate.c
index bc14444..41ec39c 100644
--- a/src/cpu/intel/smm/gen1/smmrelocate.c
+++ b/src/cpu/intel/smm/gen1/smmrelocate.c
@@ -63,8 +63,8 @@ static inline void write_smrr(struct smm_relocation_params *relo_params)
 {
 	printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
 	       relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
-	wrmsr(SMRRphysBase_MSR, relo_params->smrr_base);
-	wrmsr(SMRRphysMask_MSR, relo_params->smrr_mask);
+	wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base);
+	wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask);
 }
 
 /* The relocation work is actually performed in SMM context, but the code
@@ -109,7 +109,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
 	       save_state->smbase, save_state->iedbase, save_state);
 
 	/* Write SMRR MSRs based on indicated support. */
-	mtrr_cap = rdmsr(MTRRcap_MSR);
+	mtrr_cap = rdmsr(MTRR_CAP_MSR);
 	if (mtrr_cap.lo & SMRR_SUPPORTED)
 		write_smrr(relo_params);
 
@@ -142,7 +142,7 @@ static void fill_in_relocation_params(struct smm_relocation_params *params)
 	/* SMRR has 32-bits of valid address aligned to 4KiB. */
 	params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
 	params->smrr_base.hi = 0;
-	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRRphysMaskValid;
+	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
 	params->smrr_mask.hi = 0;
 }
 
diff --git a/src/cpu/via/car/cache_as_ram.inc b/src/cpu/via/car/cache_as_ram.inc
index e8a4ee2..cc91c6e 100644
--- a/src/cpu/via/car/cache_as_ram.inc
+++ b/src/cpu/via/car/cache_as_ram.inc
@@ -44,9 +44,9 @@ CacheAsRam:
 	invd
 
 	/* Set the default memory type and enable fixed and variable MTRRs. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	xorl	%edx, %edx
-	movl	$(MTRRdefTypeEn | MTRRdefTypeFixEn), %eax
+	movl	$(MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN), %eax
 	wrmsr
 
 	/* Clear all MTRRs. */
@@ -66,55 +66,55 @@ clear_fixed_var_mtrr:
 
 all_mtrr_msrs:
 	/* fixed MTRR MSRs */
-	.long	MTRRfix64K_00000_MSR
-	.long	MTRRfix16K_80000_MSR
-	.long	MTRRfix16K_A0000_MSR
-	.long	MTRRfix4K_C0000_MSR
-	.long	MTRRfix4K_C8000_MSR
-	.long	MTRRfix4K_D0000_MSR
-	.long	MTRRfix4K_D8000_MSR
-	.long	MTRRfix4K_E0000_MSR
-	.long	MTRRfix4K_E8000_MSR
-	.long	MTRRfix4K_F0000_MSR
-	.long	MTRRfix4K_F8000_MSR
+	.long	MTRR_FIX_64K_00000
+	.long	MTRR_FIX_16K_80000
+	.long	MTRR_FIX_16K_A0000
+	.long	MTRR_FIX_4K_C0000
+	.long	MTRR_FIX_4K_C8000
+	.long	MTRR_FIX_4K_D0000
+	.long	MTRR_FIX_4K_D8000
+	.long	MTRR_FIX_4K_E0000
+	.long	MTRR_FIX_4K_E8000
+	.long	MTRR_FIX_4K_F0000
+	.long	MTRR_FIX_4K_F8000
 
 	/* var MTRR MSRs */
-	.long	MTRRphysBase_MSR(0)
-	.long	MTRRphysMask_MSR(0)
-	.long	MTRRphysBase_MSR(1)
-	.long	MTRRphysMask_MSR(1)
-	.long	MTRRphysBase_MSR(2)
-	.long	MTRRphysMask_MSR(2)
-	.long	MTRRphysBase_MSR(3)
-	.long	MTRRphysMask_MSR(3)
-	.long	MTRRphysBase_MSR(4)
-	.long	MTRRphysMask_MSR(4)
-	.long	MTRRphysBase_MSR(5)
-	.long	MTRRphysMask_MSR(5)
-	.long	MTRRphysBase_MSR(6)
-	.long	MTRRphysMask_MSR(6)
-	.long	MTRRphysBase_MSR(7)
-	.long	MTRRphysMask_MSR(7)
+	.long	MTRR_PHYS_BASE(0)
+	.long	MTRR_PHYS_MASK(0)
+	.long	MTRR_PHYS_BASE(1)
+	.long	MTRR_PHYS_MASK(1)
+	.long	MTRR_PHYS_BASE(2)
+	.long	MTRR_PHYS_MASK(2)
+	.long	MTRR_PHYS_BASE(3)
+	.long	MTRR_PHYS_MASK(3)
+	.long	MTRR_PHYS_BASE(4)
+	.long	MTRR_PHYS_MASK(4)
+	.long	MTRR_PHYS_BASE(5)
+	.long	MTRR_PHYS_MASK(5)
+	.long	MTRR_PHYS_BASE(6)
+	.long	MTRR_PHYS_MASK(6)
+	.long	MTRR_PHYS_BASE(7)
+	.long	MTRR_PHYS_MASK(7)
 
 	.long	0x000 /* NULL, end of table */
 
 clear_fixed_var_mtrr_out:
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 	xorl	%edx, %edx
 	movl	$(CacheBase | MTRR_TYPE_WRBACK), %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(0), %ecx
+	movl	$MTRR_PHYS_MASK(0), %ecx
 	/* This assumes we never access addresses above 2^36 in CAR. */
 	movl	$0x0000000f, %edx
-	movl	$(~(CacheSize - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CacheSize - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	/*
 	 * Enable write base caching so we can do execute in place (XIP)
 	 * on the flash ROM.
 	 */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -125,16 +125,16 @@ clear_fixed_var_mtrr_out:
 	orl	$MTRR_TYPE_WRBACK, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$0x0000000f, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	/* Set the default memory type and enable fixed and variable MTRRs. */
 	/* TODO: Or also enable fixed MTRRs? Bug in the code? */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	xorl	%edx, %edx
-	movl	$(MTRRdefTypeEn), %eax
+	movl	$(MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	/* Enable cache. */
@@ -224,24 +224,24 @@ testok:
 
 	/* Set the default memory type and enable variable MTRRs. */
 	/* TODO: Or also enable fixed MTRRs? Bug in the code? */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	xorl	%edx, %edx
-	movl	$(MTRRdefTypeEn), %eax
+	movl	$(MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	/* Enable caching for CONFIG_RAMBASE..CONFIG_RAMTOP. */
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 	xorl	%edx, %edx
 	movl	$(CONFIG_RAMBASE | MTRR_TYPE_WRBACK), %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(0), %ecx
+	movl	$MTRR_PHYS_MASK(0), %ecx
 	movl	$0x0000000f, %edx	/* AMD 40 bit 0xff */
-	movl	$(~(CONFIG_RAMTOP - CONFIG_RAMBASE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_RAMTOP - CONFIG_RAMBASE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	/* Cache XIP_ROM area to speedup coreboot code. */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -252,9 +252,9 @@ testok:
 	orl	$MTRR_TYPE_WRBACK, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	xorl	%edx, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	/* Enable cache. */
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c
index 3c8450f..c72cf0a 100644
--- a/src/cpu/x86/mp_init.c
+++ b/src/cpu/x86/mp_init.c
@@ -179,10 +179,10 @@ static void setup_default_sipi_vector_params(struct sipi_params *sp)
 
 #define NUM_FIXED_MTRRS 11
 static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
-	MTRRfix64K_00000_MSR, MTRRfix16K_80000_MSR, MTRRfix16K_A0000_MSR,
-	MTRRfix4K_C0000_MSR, MTRRfix4K_C8000_MSR, MTRRfix4K_D0000_MSR,
-	MTRRfix4K_D8000_MSR, MTRRfix4K_E0000_MSR, MTRRfix4K_E8000_MSR,
-	MTRRfix4K_F0000_MSR, MTRRfix4K_F8000_MSR,
+	MTRR_FIX_64K_00000, MTRR_FIX_16K_80000, MTRR_FIX_16K_A0000,
+	MTRR_FIX_4K_C0000, MTRR_FIX_4K_C8000, MTRR_FIX_4K_D0000,
+	MTRR_FIX_4K_D8000, MTRR_FIX_4K_E0000, MTRR_FIX_4K_E8000,
+	MTRR_FIX_4K_F0000, MTRR_FIX_4K_F8000,
 };
 
 static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
@@ -208,7 +208,7 @@ static int save_bsp_msrs(char *start, int size)
 	msr_t msr;
 
 	/* Determine number of MTRRs need to be saved. */
-	msr = rdmsr(MTRRcap_MSR);
+	msr = rdmsr(MTRR_CAP_MSR);
 	num_var_mtrrs = msr.lo & 0xff;
 
 	/* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
@@ -225,11 +225,11 @@ static int save_bsp_msrs(char *start, int size)
 	}
 
 	for (i = 0; i < num_var_mtrrs; i++) {
-		msr_entry = save_msr(MTRRphysBase_MSR(i), msr_entry);
-		msr_entry = save_msr(MTRRphysMask_MSR(i), msr_entry);
+		msr_entry = save_msr(MTRR_PHYS_BASE(i), msr_entry);
+		msr_entry = save_msr(MTRR_PHYS_MASK(i), msr_entry);
 	}
 
-	msr_entry = save_msr(MTRRdefType_MSR, msr_entry);
+	msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
 
 	return msr_count;
 }
diff --git a/src/cpu/x86/mtrr/earlymtrr.c b/src/cpu/x86/mtrr/earlymtrr.c
index 9561d8d..2e31a6e 100644
--- a/src/cpu/x86/mtrr/earlymtrr.c
+++ b/src/cpu/x86/mtrr/earlymtrr.c
@@ -13,10 +13,10 @@ void set_var_mtrr(
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 #if !IS_ENABLED(CONFIG_CACHE_AS_RAM)
@@ -36,7 +36,7 @@ const int addr_det = 0;
 static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
 {
 	/* Precondition:
-	 *   The cache is not enabled in cr0 nor in MTRRdefType_MSR
+	 *   The cache is not enabled in cr0 nor in MTRR_DEF_TYPE_MSR
 	 *   entry32.inc ensures the cache is not enabled in cr0
 	 */
 	msr_t msr;
@@ -65,7 +65,7 @@ static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 
 }
 
@@ -99,7 +99,7 @@ static inline int early_mtrr_init_detected(void)
 	 * on both Intel and AMD cpus, at least
 	 * according to the documentation.
 	 */
-	msr = rdmsr(MTRRdefType_MSR);
-	return msr.lo & MTRRdefTypeEn;
+	msr = rdmsr(MTRR_DEF_TYPE_MSR);
+	return msr.lo & MTRR_DEF_TYPE_EN;
 }
 #endif
diff --git a/src/cpu/x86/mtrr/mtrr.c b/src/cpu/x86/mtrr/mtrr.c
index 1994a56..072b887 100644
--- a/src/cpu/x86/mtrr/mtrr.c
+++ b/src/cpu/x86/mtrr/mtrr.c
@@ -64,7 +64,7 @@ static void detect_var_mtrrs(void)
 {
 	msr_t msr;
 
-	msr = rdmsr(MTRRcap_MSR);
+	msr = rdmsr(MTRR_CAP_MSR);
 
 	total_mtrrs = msr.lo & 0xff;
 
@@ -81,19 +81,19 @@ void enable_fixed_mtrr(void)
 {
 	msr_t msr;
 
-	msr = rdmsr(MTRRdefType_MSR);
-	msr.lo |= MTRRdefTypeEn | MTRRdefTypeFixEn;
-	wrmsr(MTRRdefType_MSR, msr);
+	msr = rdmsr(MTRR_DEF_TYPE_MSR);
+	msr.lo |= MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN;
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void enable_var_mtrr(unsigned char deftype)
 {
 	msr_t msr;
 
-	msr = rdmsr(MTRRdefType_MSR);
+	msr = rdmsr(MTRR_DEF_TYPE_MSR);
 	msr.lo &= ~0xff;
-	msr.lo |= MTRRdefTypeEn | deftype;
-	wrmsr(MTRRdefType_MSR, msr);
+	msr.lo |= MTRR_DEF_TYPE_EN | deftype;
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 /* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
@@ -250,11 +250,11 @@ static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
 /* Fixed MTRR descriptors. */
 static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
 	{ PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
-	  PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRRfix64K_00000_MSR },
+	  PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000 },
 	{ PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
-	  PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRRfix16K_80000_MSR },
+	  PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000 },
 	{ PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
-	  PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRRfix4K_C0000_MSR },
+	  PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000 },
 };
 
 static void calc_fixed_mtrrs(void)
@@ -410,9 +410,9 @@ static void clear_var_mtrr(int index)
 {
 	msr_t msr_val;
 
-	msr_val = rdmsr(MTRRphysMask_MSR(index));
-	msr_val.lo &= ~MTRRphysMaskValid;
-	wrmsr(MTRRphysMask_MSR(index), msr_val);
+	msr_val = rdmsr(MTRR_PHYS_MASK(index));
+	msr_val.lo &= ~MTRR_PHYS_MASK_VALID;
+	wrmsr(MTRR_PHYS_MASK(index), msr_val);
 }
 
 static void prep_var_mtrr(struct var_mtrr_state *var_state,
@@ -453,7 +453,7 @@ static void prep_var_mtrr(struct var_mtrr_state *var_state,
 	regs->base.hi = rbase >> 32;
 
 	regs->mask.lo = rsize;
-	regs->mask.lo |= MTRRphysMaskValid;
+	regs->mask.lo |= MTRR_PHYS_MASK_VALID;
 	regs->mask.hi = rsize >> 32;
 }
 
@@ -772,8 +772,8 @@ static void commit_var_mtrrs(const struct var_mtrr_solution *sol)
 	/* Write out the variable MTRRs. */
 	disable_cache();
 	for (i = 0; i < sol->num_used; i++) {
-		wrmsr(MTRRphysBase_MSR(i), sol->regs[i].base);
-		wrmsr(MTRRphysMask_MSR(i), sol->regs[i].mask);
+		wrmsr(MTRR_PHYS_BASE(i), sol->regs[i].base);
+		wrmsr(MTRR_PHYS_MASK(i), sol->regs[i].mask);
 	}
 	/* Clear the ones that are unused. */
 	for (; i < total_mtrrs; i++)
@@ -818,16 +818,16 @@ void x86_mtrr_check(void)
 	msr_t msr;
 	printk(BIOS_DEBUG, "\nMTRR check\n");
 
-	msr = rdmsr(MTRRdefType_MSR);
+	msr = rdmsr(MTRR_DEF_TYPE_MSR);
 
 	printk(BIOS_DEBUG, "Fixed MTRRs   : ");
-	if (msr.lo & MTRRdefTypeFixEn)
+	if (msr.lo & MTRR_DEF_TYPE_FIX_EN)
 		printk(BIOS_DEBUG, "Enabled\n");
 	else
 		printk(BIOS_DEBUG, "Disabled\n");
 
 	printk(BIOS_DEBUG, "Variable MTRRs: ");
-	if (msr.lo & MTRRdefTypeEn)
+	if (msr.lo & MTRR_DEF_TYPE_EN)
 		printk(BIOS_DEBUG, "Enabled\n");
 	else
 		printk(BIOS_DEBUG, "Disabled\n");
diff --git a/src/drivers/intel/fsp1_1/after_raminit.S b/src/drivers/intel/fsp1_1/after_raminit.S
index fe88c9d..184fa08 100644
--- a/src/drivers/intel/fsp1_1/after_raminit.S
+++ b/src/drivers/intel/fsp1_1/after_raminit.S
@@ -84,7 +84,7 @@
 
 	/* Clear all of the variable MTRRs. */
 	popl	%ebx
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 	clr	%eax
 	clr	%edx
 
@@ -101,7 +101,7 @@
 1:
 	/* Get number of MTRRs. */
 	popl	%ebx
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 2:
 	testl	%ebx, %ebx
 	jz	2f
@@ -134,9 +134,9 @@
 	post_code(0x3a)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x3b)
diff --git a/src/include/cpu/x86/mtrr.h b/src/include/cpu/x86/mtrr.h
index bd0b603..8fd4261 100644
--- a/src/include/cpu/x86/mtrr.h
+++ b/src/include/cpu/x86/mtrr.h
@@ -2,49 +2,46 @@
 #define CPU_X86_MTRR_H
 
 /*  These are the region types  */
-#define MTRR_TYPE_UNCACHEABLE 0
-#define MTRR_TYPE_WRCOMB     1
-/*#define MTRR_TYPE_         2*/
-/*#define MTRR_TYPE_         3*/
-#define MTRR_TYPE_WRTHROUGH  4
-#define MTRR_TYPE_WRPROT     5
-#define MTRR_TYPE_WRBACK     6
-#define MTRR_NUM_TYPES       7
-
-#define MTRRcap_MSR     0x0fe
-
-#define MTRRcapSmrr		(1 << 11)
-#define MTRRcapWc		(1 << 10)
-#define MTRRcapFix		(1 << 8)
-#define MTRRcapVcnt		0xff
-
-#define MTRRdefType_MSR 0x2ff
-
-#define MTRRdefTypeEn		(1 << 11)
-#define MTRRdefTypeFixEn	(1 << 10)
-#define MTRRdefTypeType		0xff
-
-#define SMRRphysBase_MSR 0x1f2
-#define SMRRphysMask_MSR 0x1f3
-
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
-#define MTRRphysMaskValid	(1 << 11)
-
-#define NUM_FIXED_RANGES 88
-#define RANGES_PER_FIXED_MTRR 8
-#define MTRRfix64K_00000_MSR 0x250
-#define MTRRfix16K_80000_MSR 0x258
-#define MTRRfix16K_A0000_MSR 0x259
-#define MTRRfix4K_C0000_MSR 0x268
-#define MTRRfix4K_C8000_MSR 0x269
-#define MTRRfix4K_D0000_MSR 0x26a
-#define MTRRfix4K_D8000_MSR 0x26b
-#define MTRRfix4K_E0000_MSR 0x26c
-#define MTRRfix4K_E8000_MSR 0x26d
-#define MTRRfix4K_F0000_MSR 0x26e
-#define MTRRfix4K_F8000_MSR 0x26f
+#define MTRR_TYPE_UNCACHEABLE		0
+#define MTRR_TYPE_WRCOMB		1
+#define MTRR_TYPE_WRTHROUGH		4
+#define MTRR_TYPE_WRPROT		5
+#define MTRR_TYPE_WRBACK		6
+#define MTRR_NUM_TYPES			7
+
+#define MTRR_CAP_MSR			0x0fe
+
+#define MTRR_CAP_SMRR			(1 << 11)
+#define MTRR_CAP_WC			(1 << 10)
+#define MTRR_CAP_FIX			(1 << 8)
+#define MTRR_CAP_VCNT			0xff
+
+#define MTRR_DEF_TYPE_MSR		0x2ff
+#define MTRR_DEF_TYPE_MASK		0xff
+#define MTRR_DEF_TYPE_EN		(1 << 11)
+#define MTRR_DEF_TYPE_FIX_EN		(1 << 10)
+
+
+#define SMRR_PHYS_BASE			0x1f2
+#define SMRR_PHYS_MASK			0x1f3
+
+#define MTRR_PHYS_BASE(reg) 		(0x200 + 2 * (reg))
+#define MTRR_PHYS_MASK(reg) 		(MTRR_PHYS_BASE(reg) + 1)
+#define  MTRR_PHYS_MASK_VALID		(1 << 11)
+
+#define NUM_FIXED_RANGES 		88
+#define RANGES_PER_FIXED_MTRR 		8
+#define MTRR_FIX_64K_00000		0x250
+#define MTRR_FIX_16K_80000		0x258
+#define MTRR_FIX_16K_A0000		0x259
+#define MTRR_FIX_4K_C0000		0x268
+#define MTRR_FIX_4K_C8000		0x269
+#define MTRR_FIX_4K_D0000		0x26a
+#define MTRR_FIX_4K_D8000		0x26b
+#define MTRR_FIX_4K_E0000		0x26c
+#define MTRR_FIX_4K_E8000		0x26d
+#define MTRR_FIX_4K_F0000		0x26e
+#define MTRR_FIX_4K_F8000		0x26f
 
 #if !defined (__ASSEMBLER__) && !defined(__PRE_RAM__)
 
diff --git a/src/northbridge/amd/amdk8/raminit_f_dqs.c b/src/northbridge/amd/amdk8/raminit_f_dqs.c
index 609cddf..59a5fef 100644
--- a/src/northbridge/amd/amdk8/raminit_f_dqs.c
+++ b/src/northbridge/amd/amdk8/raminit_f_dqs.c
@@ -1634,13 +1634,13 @@ static void set_var_mtrr_dqs(
 		zero.lo = zero.hi = 0;
 		/* The invalid bit is kept in the mask, so we simply clear the
 		   relevant mask register to disable a range. */
-		wrmsr (MTRRphysMask_MSR(reg), zero);
+		wrmsr (MTRR_PHYS_MASK(reg), zero);
 	} else {
 		/* Bit 32-35 of MTRRphysMask should be set to 1 */
 		base.lo |= type;
 		mask.lo |= 0x800;
-		wrmsr (MTRRphysBase_MSR(reg), base);
-		wrmsr (MTRRphysMask_MSR(reg), mask);
+		wrmsr (MTRR_PHYS_BASE(reg), base);
+		wrmsr (MTRR_PHYS_MASK(reg), mask);
 	}
 }
 
diff --git a/src/northbridge/intel/e7505/raminit.c b/src/northbridge/intel/e7505/raminit.c
index fc715bc..b48328f 100644
--- a/src/northbridge/intel/e7505/raminit.c
+++ b/src/northbridge/intel/e7505/raminit.c
@@ -986,10 +986,10 @@ static inline void __attribute__((always_inline))
 		 */
 
 		/* Disable and invalidate all cache. */
-		msr_t xip_mtrr = rdmsr(MTRRphysMask_MSR(1));
-		xip_mtrr.lo &= ~MTRRphysMaskValid;
+		msr_t xip_mtrr = rdmsr(MTRR_PHYS_MASK(1));
+		xip_mtrr.lo &= ~MTRR_PHYS_MASK_VALID;
 		invd();
-		wrmsr(MTRRphysMask_MSR(1), xip_mtrr);
+		wrmsr(MTRR_PHYS_MASK(1), xip_mtrr);
 		invd();
 
 		RAM_DEBUG_MESSAGE("ECC state initialized.\n");
diff --git a/src/northbridge/intel/nehalem/raminit.c b/src/northbridge/intel/nehalem/raminit.c
index c41310a..232a15a 100644
--- a/src/northbridge/intel/nehalem/raminit.c
+++ b/src/northbridge/intel/nehalem/raminit.c
@@ -2066,8 +2066,8 @@ static void disable_cache(void)
 {
 	msr_t msr = {.lo = 0, .hi = 0 };
 
-	wrmsr(MTRRphysBase_MSR(3), msr);
-	wrmsr(MTRRphysMask_MSR(3), msr);
+	wrmsr(MTRR_PHYS_BASE(3), msr);
+	wrmsr(MTRR_PHYS_MASK(3), msr);
 }
 
 static void enable_cache(unsigned int base, unsigned int size)
@@ -2075,11 +2075,11 @@ static void enable_cache(unsigned int base, unsigned int size)
 	msr_t msr;
 	msr.lo = base | MTRR_TYPE_WRPROT;
 	msr.hi = 0;
-	wrmsr(MTRRphysBase_MSR(3), msr);
-	msr.lo = ((~(ALIGN_DOWN(size + 4096, 4096) - 1) | MTRRdefTypeEn)
+	wrmsr(MTRR_PHYS_BASE(3), msr);
+	msr.lo = ((~(ALIGN_DOWN(size + 4096, 4096) - 1) | MTRR_DEF_TYPE_EN)
 		  & 0xffffffff);
 	msr.hi = 0x0000000f;
-	wrmsr(MTRRphysMask_MSR(3), msr);
+	wrmsr(MTRR_PHYS_MASK(3), msr);
 }
 
 static void flush_cache(u32 start, u32 size)
@@ -4017,7 +4017,7 @@ void raminit(const int s3resume, const u8 *spd_addrmap)
 
 	write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) & 0xfc);
 #if !REAL
-	rdmsr (MTRRphysMask_MSR (3));
+	rdmsr (MTRR_PHYS_MASK (3));
 #endif
 
 	collect_system_info(&info);
diff --git a/src/soc/intel/baytrail/bootblock/bootblock.c b/src/soc/intel/baytrail/bootblock/bootblock.c
index 6d31add..f73ac46 100644
--- a/src/soc/intel/baytrail/bootblock/bootblock.c
+++ b/src/soc/intel/baytrail/bootblock/bootblock.c
@@ -29,10 +29,10 @@ static void set_var_mtrr(int reg, uint32_t base, uint32_t size, int type)
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -47,7 +47,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void setup_mmconfig(void)
diff --git a/src/soc/intel/baytrail/cpu.c b/src/soc/intel/baytrail/cpu.c
index 6b84c59..81e04ba 100644
--- a/src/soc/intel/baytrail/cpu.c
+++ b/src/soc/intel/baytrail/cpu.c
@@ -197,10 +197,10 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
 	/* Set up SMRR. */
 	smrr.lo = relo_attrs.smrr_base;
 	smrr.hi = 0;
-	wrmsr(SMRRphysBase_MSR, smrr);
+	wrmsr(SMRR_PHYS_BASE, smrr);
 	smrr.lo = relo_attrs.smrr_mask;
 	smrr.hi = 0;
-	wrmsr(SMRRphysMask_MSR, smrr);
+	wrmsr(SMRR_PHYS_MASK, smrr);
 
 	/* The relocated handler runs with all CPUs concurrently. Therefore
 	 * stagger the entry points adjusting SMBASE downwards by save state
@@ -264,7 +264,7 @@ static int smm_load_handlers(void)
 	relo_attrs.smbase = (uint32_t)smm_region_start();
 	relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
 	relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask;
-	relo_attrs.smrr_mask |= MTRRphysMaskValid;
+	relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
 
 	/* Install handlers. */
 	if (install_relocation_handler(pattrs->num_cpus) < 0) {
diff --git a/src/soc/intel/baytrail/romstage/cache_as_ram.inc b/src/soc/intel/baytrail/romstage/cache_as_ram.inc
index 583ec58..2106029 100644
--- a/src/soc/intel/baytrail/romstage/cache_as_ram.inc
+++ b/src/soc/intel/baytrail/romstage/cache_as_ram.inc
@@ -60,7 +60,7 @@ wait_for_sipi:
 	post_code(0x21)
 	/* Configure the default memory type to uncacheable as well as disable
 	 * fixed and variable range mtrrs. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
 	andl	$(~0x00000cff), %eax
 	wrmsr
@@ -95,34 +95,34 @@ wait_for_sipi:
 
 	post_code(0x23)
 	/* Set Cache-as-RAM base address. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
 
 	post_code(0x24)
 	/* Set Cache-as-RAM mask. */
-	movl	$(MTRRphysMask_MSR(0)), %ecx
-	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(MTRR_PHYS_MASK(0)), %ecx
+	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 	post_code(0x25)
 	/* Set code caching up for romstage. */
-	movl	$(MTRRphysBase_MSR(1)), %ecx
+	movl	$(MTRR_PHYS_BASE(1)), %ecx
 	movl	$(CODE_CACHE_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
 
-	movl	$(MTRRphysMask_MSR(1)), %ecx
-	movl	$(CODE_CACHE_MASK | MTRRphysMaskValid), %eax
+	movl	$(MTRR_PHYS_MASK(1)), %ecx
+	movl	$(CODE_CACHE_MASK | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x26)
@@ -198,9 +198,9 @@ before_romstage:
 	post_code(0x2c)
 
 	/* Disable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$(~MTRRdefTypeEn), %eax
+	andl	$(~MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	invd
@@ -225,7 +225,7 @@ before_romstage:
 
 	/* Get number of MTRRs. */
 	popl	%ebx
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 1:
 	testl	%ebx, %ebx
 	jz	1f
@@ -258,9 +258,9 @@ before_romstage:
 	post_code(0x30)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x31)
diff --git a/src/soc/intel/baytrail/romstage/romstage.c b/src/soc/intel/baytrail/romstage/romstage.c
index c7f66bb..81978b4 100644
--- a/src/soc/intel/baytrail/romstage/romstage.c
+++ b/src/soc/intel/baytrail/romstage/romstage.c
@@ -311,14 +311,14 @@ static void *setup_stack_and_mttrs(void)
 
 	/* Cache the ROM as WP just below 4GiB. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
 	num_mtrrs++;
 
 	/* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -329,7 +329,7 @@ static void *setup_stack_and_mttrs(void)
 	 * this area as cacheable so it can be used later for ramstage before
 	 * setting up the entire RAM as cacheable. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -340,7 +340,7 @@ static void *setup_stack_and_mttrs(void)
 	 * provides faster access when relocating the SMM handler as well
 	 * as using the TSEG region for other purposes. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
diff --git a/src/soc/intel/braswell/bootblock/bootblock.c b/src/soc/intel/braswell/bootblock/bootblock.c
index f98f694..ca19210 100644
--- a/src/soc/intel/braswell/bootblock/bootblock.c
+++ b/src/soc/intel/braswell/bootblock/bootblock.c
@@ -30,10 +30,10 @@ static void set_var_mtrr(int reg, uint32_t base, uint32_t size, int type)
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -48,7 +48,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void setup_mmconfig(void)
diff --git a/src/soc/intel/braswell/cpu.c b/src/soc/intel/braswell/cpu.c
index 2ab8725..41a43ee 100644
--- a/src/soc/intel/braswell/cpu.c
+++ b/src/soc/intel/braswell/cpu.c
@@ -206,10 +206,10 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
 	/* Set up SMRR. */
 	smrr.lo = relo_attrs.smrr_base;
 	smrr.hi = 0;
-	wrmsr(SMRRphysBase_MSR, smrr);
+	wrmsr(SMRR_PHYS_BASE, smrr);
 	smrr.lo = relo_attrs.smrr_mask;
 	smrr.hi = 0;
-	wrmsr(SMRRphysMask_MSR, smrr);
+	wrmsr(SMRR_PHYS_MASK, smrr);
 
 	/*
 	 * The relocated handler runs with all CPUs concurrently. Therefore
@@ -284,7 +284,7 @@ static int smm_load_handlers(void)
 	relo_attrs.smbase = (uint32_t)smm_base;
 	relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
 	relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
-	relo_attrs.smrr_mask |= MTRRphysMaskValid;
+	relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
 
 	/* Install handlers. */
 	if (install_relocation_handler(pattrs->num_cpus) < 0) {
diff --git a/src/soc/intel/broadwell/bootblock/cpu.c b/src/soc/intel/broadwell/bootblock/cpu.c
index 83bd873..6e9d3a5 100644
--- a/src/soc/intel/broadwell/bootblock/cpu.c
+++ b/src/soc/intel/broadwell/bootblock/cpu.c
@@ -36,10 +36,10 @@ static void set_var_mtrr(
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -54,7 +54,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void bootblock_mdelay(int ms)
@@ -120,12 +120,12 @@ static void set_flex_ratio_to_tdp_nominal(void)
 static void check_for_clean_reset(void)
 {
 	msr_t msr;
-	msr = rdmsr(MTRRdefType_MSR);
+	msr = rdmsr(MTRR_DEF_TYPE_MSR);
 
 	/* Use the MTRR default type MSR as a proxy for detecting INIT#.
 	 * Reset the system if any known bits are set in that MSR. That is
 	 * an indication of the CPU not being properly reset. */
-	if (msr.lo & (MTRRdefTypeEn | MTRRdefTypeFixEn)) {
+	if (msr.lo & (MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN)) {
 		outb(0x0, 0xcf9);
 		outb(0x6, 0xcf9);
 		halt();
diff --git a/src/soc/intel/broadwell/include/soc/msr.h b/src/soc/intel/broadwell/include/soc/msr.h
index 914a11f..7ed61f4 100644
--- a/src/soc/intel/broadwell/include/soc/msr.h
+++ b/src/soc/intel/broadwell/include/soc/msr.h
@@ -102,7 +102,7 @@
 #define SMBASE_MSR			0xc20
 #define IEDBASE_MSR			0xc22
 
-/* MTRRcap_MSR bits */
+/* MTRR_CAP_MSR bits */
 #define SMRR_SUPPORTED (1<<11)
 #define EMRR_SUPPORTED (1<<12)
 
diff --git a/src/soc/intel/broadwell/romstage/cache_as_ram.inc b/src/soc/intel/broadwell/romstage/cache_as_ram.inc
index 3f1b12a..8359e4a 100644
--- a/src/soc/intel/broadwell/romstage/cache_as_ram.inc
+++ b/src/soc/intel/broadwell/romstage/cache_as_ram.inc
@@ -76,31 +76,31 @@ clear_mtrrs:
 
 	post_code(0x22)
 	/* Configure the default memory type to uncacheable. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
 	andl	$(~0x00000cff), %eax
 	wrmsr
 
 	post_code(0x23)
 	/* Set Cache-as-RAM base address. */
-	movl	$(MTRRphysBase_MSR(0)), %ecx
+	movl	$(MTRR_PHYS_BASE(0)), %ecx
 	movl	$(CACHE_AS_RAM_BASE | MTRR_TYPE_WRBACK), %eax
 	xorl	%edx, %edx
 	wrmsr
 
 	post_code(0x24)
 	/* Set Cache-as-RAM mask. */
-	movl	$(MTRRphysMask_MSR(0)), %ecx
-	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(MTRR_PHYS_MASK(0)), %ecx
+	movl	$(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
 	post_code(0x25)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	/* Enable cache (CR0.CD = 0, CR0.NW = 0). */
@@ -136,7 +136,7 @@ clear_mtrrs:
 	movl	%eax, %cr0
 
 	/* Enable cache for our code in Flash because we do XIP here */
-	movl	$MTRRphysBase_MSR(1), %ecx
+	movl	$MTRR_PHYS_BASE(1), %ecx
 	xorl	%edx, %edx
 	/*
 	 * IMPORTANT: The following calculation _must_ be done at runtime. See
@@ -147,19 +147,19 @@ clear_mtrrs:
 	orl	$MTRR_TYPE_WRPROT, %eax
 	wrmsr
 
-	movl	$MTRRphysMask_MSR(1), %ecx
+	movl	$MTRR_PHYS_MASK(1), %ecx
 	movl	$CPU_PHYSMASK_HI, %edx
-	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+	movl	$(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
 	wrmsr
 
 	post_code(0x27)
 	/* Enable caching for ram init code to run faster */
-	movl	$MTRRphysBase_MSR(2), %ecx
+	movl	$MTRR_PHYS_BASE(2), %ecx
 	movl	$(CACHE_MRC_BASE | MTRR_TYPE_WRPROT), %eax
 	xorl	%edx, %edx
 	wrmsr
-	movl	$MTRRphysMask_MSR(2), %ecx
-	movl	$(CACHE_MRC_MASK | MTRRphysMaskValid), %eax
+	movl	$MTRR_PHYS_MASK(2), %ecx
+	movl	$(CACHE_MRC_MASK | MTRR_PHYS_MASK_VALID), %eax
 	movl	$CPU_PHYSMASK_HI, %edx
 	wrmsr
 
@@ -217,9 +217,9 @@ before_romstage:
 	post_code(0x31)
 
 	/* Disable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	andl	$(~MTRRdefTypeEn), %eax
+	andl	$(~MTRR_DEF_TYPE_EN), %eax
 	wrmsr
 
 	post_code(0x31)
@@ -240,9 +240,9 @@ before_romstage:
 	/* Clear MTRR that was used to cache MRC */
 	xorl	%eax, %eax
 	xorl	%edx, %edx
-	movl	$MTRRphysBase_MSR(2), %ecx
+	movl	$MTRR_PHYS_BASE(2), %ecx
 	wrmsr
-	movl	$MTRRphysMask_MSR(2), %ecx
+	movl	$MTRR_PHYS_MASK(2), %ecx
 	wrmsr
 
 	post_code(0x33)
@@ -266,7 +266,7 @@ before_romstage:
 
 	/* Get number of MTRRs. */
 	popl	%ebx
-	movl	$MTRRphysBase_MSR(0), %ecx
+	movl	$MTRR_PHYS_BASE(0), %ecx
 1:
 	testl	%ebx, %ebx
 	jz	1f
@@ -299,9 +299,9 @@ before_romstage:
 	post_code(0x3a)
 
 	/* Enable MTRR. */
-	movl	$MTRRdefType_MSR, %ecx
+	movl	$MTRR_DEF_TYPE_MSR, %ecx
 	rdmsr
-	orl	$MTRRdefTypeEn, %eax
+	orl	$MTRR_DEF_TYPE_EN, %eax
 	wrmsr
 
 	post_code(0x3b)
diff --git a/src/soc/intel/broadwell/romstage/stack.c b/src/soc/intel/broadwell/romstage/stack.c
index ed8e9c3..e66ce75 100644
--- a/src/soc/intel/broadwell/romstage/stack.c
+++ b/src/soc/intel/broadwell/romstage/stack.c
@@ -82,14 +82,14 @@ void *setup_stack_and_mttrs(void)
 
 	/* Cache the ROM as WP just below 4GiB. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
 	num_mtrrs++;
 
 	/* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -100,7 +100,7 @@ void *setup_stack_and_mttrs(void)
 	 * this area as cacheable so it can be used later for ramstage before
 	 * setting up the entire RAM as cacheable. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -111,7 +111,7 @@ void *setup_stack_and_mttrs(void)
 	 * provides faster access when relocating the SMM handler as well
 	 * as using the TSEG region for other purposes. */
 	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push(slot, 0); /* upper base */
 	slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
diff --git a/src/soc/intel/broadwell/smmrelocate.c b/src/soc/intel/broadwell/smmrelocate.c
index f4525da..4d595f1 100644
--- a/src/soc/intel/broadwell/smmrelocate.c
+++ b/src/soc/intel/broadwell/smmrelocate.c
@@ -42,8 +42,8 @@ static inline void write_smrr(struct smm_relocation_params *relo_params)
 {
 	printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
 	       relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
-	wrmsr(SMRRphysBase_MSR, relo_params->smrr_base);
-	wrmsr(SMRRphysMask_MSR, relo_params->smrr_mask);
+	wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base);
+	wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask);
 }
 
 static inline void write_emrr(struct smm_relocation_params *relo_params)
@@ -183,7 +183,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
 	update_save_state(cpu, relo_params, runtime);
 
 	/* Write EMRR and SMRR MSRs based on indicated support. */
-	mtrr_cap = rdmsr(MTRRcap_MSR);
+	mtrr_cap = rdmsr(MTRR_CAP_MSR);
 	if (mtrr_cap.lo & SMRR_SUPPORTED)
 		write_smrr(relo_params);
 
@@ -241,7 +241,7 @@ static void fill_in_relocation_params(device_t dev,
 	/* SMRR has 32-bits of valid address aligned to 4KiB. */
 	params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
 	params->smrr_base.hi = 0;
-	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRRphysMaskValid;
+	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
 	params->smrr_mask.hi = 0;
 
 	/* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
@@ -252,14 +252,14 @@ static void fill_in_relocation_params(device_t dev,
 	 * on the number of physical address bits supported. */
 	params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
 	params->emrr_base.hi = 0;
-	params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRRphysMaskValid;
+	params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
 	params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
 
 	/* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */
 	params->uncore_emrr_base.lo = emrr_base;
 	params->uncore_emrr_base.hi = 0;
 	params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) |
-	                              MTRRphysMaskValid;
+	                              MTRR_PHYS_MASK_VALID;
 	params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1;
 }
 
diff --git a/src/soc/intel/common/stack.c b/src/soc/intel/common/stack.c
index 45e61f9..6cf03f2 100644
--- a/src/soc/intel/common/stack.c
+++ b/src/soc/intel/common/stack.c
@@ -95,7 +95,7 @@ void *setup_stack_and_mtrrs(void)
 
 	/* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
 	slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push32(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
+	slot = stack_push32(slot, ~(CONFIG_RAMTOP - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push32(slot, 0); /* upper base */
 	slot = stack_push32(slot, 0 | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -133,7 +133,7 @@ void *setup_stack_and_mtrrs(void)
 	 * of the FSP reserved memory region.
 	 */
 	slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push32(slot, ~(alignment - 1) | MTRRphysMaskValid);
+	slot = stack_push32(slot, ~(alignment - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push32(slot, 0); /* upper base */
 	slot = stack_push32(slot, aligned_ram | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -152,7 +152,7 @@ void *setup_stack_and_mtrrs(void)
 	smm_region(&smm_base, &smm_size);
 	tseg_base = (uint32_t)smm_base;
 	slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push32(slot, ~(alignment - 1) | MTRRphysMaskValid);
+	slot = stack_push32(slot, ~(alignment - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push32(slot, 0); /* upper base */
 	slot = stack_push32(slot, tseg_base | MTRR_TYPE_WRBACK);
 	num_mtrrs++;
@@ -160,7 +160,7 @@ void *setup_stack_and_mtrrs(void)
 
 	/* Cache the ROM as WP just below 4GiB. */
 	slot = stack_push32(slot, mtrr_mask_upper); /* upper mask */
-	slot = stack_push32(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid);
+	slot = stack_push32(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID);
 	slot = stack_push32(slot, 0); /* upper base */
 	slot = stack_push32(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
 	num_mtrrs++;
diff --git a/src/soc/intel/common/util.c b/src/soc/intel/common/util.c
index a6adfaf..591c100 100644
--- a/src/soc/intel/common/util.c
+++ b/src/soc/intel/common/util.c
@@ -77,10 +77,10 @@ uint32_t soc_get_variable_mtrr_count(uint64_t *msr)
 		msr_t s;
 	} mttrcap;
 
-	mttrcap.s = rdmsr(MTRRcap_MSR);
+	mttrcap.s = rdmsr(MTRR_CAP_MSR);
 	if (msr != NULL)
 		*msr = mttrcap.u64;
-	return mttrcap.u64 & MTRRcapVcnt;
+	return mttrcap.u64 & MTRR_CAP_VCNT;
 }
 
 static const char *soc_display_mtrr_type(uint32_t type)
@@ -105,13 +105,13 @@ static void soc_display_mtrr_fixed_types(uint64_t msr,
 	uint32_t next_type;
 	uint32_t type;
 
-	type = msr & MTRRdefTypeType;
+	type = msr & MTRR_DEF_TYPE_MASK;
 	base_address = starting_address;
 	next_address = base_address;
 	for (index = 0; index < 64; index += 8) {
 		next_address = starting_address + (memory_size *
 			((index >> 3) + 1));
-		next_type = (msr >> index) & MTRRdefTypeType;
+		next_type = (msr >> index) & MTRR_DEF_TYPE_MASK;
 		if (next_type != type) {
 			printk(BIOS_DEBUG, "    0x%08x - 0x%08x: %s\n",
 				base_address, next_address - 1,
@@ -159,7 +159,7 @@ static void soc_display_64k_mtrr(void)
 		msr_t s;
 	} msr;
 
-	msr.s = rdmsr(MTRRfix64K_00000_MSR);
+	msr.s = rdmsr(MTRR_FIX_64K_00000);
 	printk(BIOS_DEBUG, "0x%016llx: IA32_MTRR_FIX64K_00000\n", msr.u64);
 	soc_display_mtrr_fixed_types(msr.u64, 0, 0x10000);
 }
@@ -173,9 +173,9 @@ static uint32_t soc_display_mtrrcap(void)
 	printk(BIOS_DEBUG,
 		"0x%016llx: IA32_MTRRCAP: %s%s%s%d variable MTRRs\n",
 		msr,
-		(msr & MTRRcapSmrr) ? "SMRR, " : "",
-		(msr & MTRRcapWc) ? "WC, " : "",
-		(msr & MTRRcapFix) ? "FIX, " : "",
+		(msr & MTRR_CAP_SMRR) ? "SMRR, " : "",
+		(msr & MTRR_CAP_WC) ? "WC, " : "",
+		(msr & MTRR_CAP_FIX) ? "FIX, " : "",
 		variable_mtrrs);
 	return variable_mtrrs;
 }
@@ -187,12 +187,12 @@ static void soc_display_mtrr_def_type(void)
 		msr_t s;
 	} msr;
 
-	msr.s = rdmsr(MTRRdefType_MSR);
+	msr.s = rdmsr(MTRR_DEF_TYPE_MSR);
 	printk(BIOS_DEBUG, "0x%016llx: IA32_MTRR_DEF_TYPE:%s%s %s\n",
 		msr.u64,
-		(msr.u64 & MTRRdefTypeEn) ? " E," : "",
-		(msr.u64 & MTRRdefTypeFixEn) ? " FE," : "",
-		soc_display_mtrr_type((uint32_t)(msr.u64 & MTRRdefTypeType)));
+		(msr.u64 & MTRR_DEF_TYPE_EN) ? " E," : "",
+		(msr.u64 & MTRR_DEF_TYPE_FIX_EN) ? " FE," : "",
+		soc_display_mtrr_type((uint32_t)(msr.u64 & MTRR_DEF_TYPE_MASK)));
 }
 
 static void soc_display_variable_mtrr(uint32_t msr_reg, int index,
@@ -212,13 +212,13 @@ static void soc_display_variable_mtrr(uint32_t msr_reg, int index,
 
 	msr_a.s = rdmsr(msr_reg);
 	msr_m.s = rdmsr(msr_reg + 1);
-	if (msr_m.u64 & MTRRphysMaskValid) {
+	if (msr_m.u64 & MTRR_PHYS_MASK_VALID) {
 		base_address = (msr_a.u64 & 0xfffffffffffff000ULL)
 			& address_mask;
 		printk(BIOS_DEBUG,
 			"0x%016llx: PHYBASE%d: Address = 0x%016llx, %s\n",
 			msr_a.u64, index, base_address,
-			soc_display_mtrr_type(msr_a.u64 & MTRRdefTypeType));
+			soc_display_mtrr_type(msr_a.u64 & MTRR_DEF_TYPE_MASK));
 		mask = (msr_m.u64 & 0xfffffffffffff000ULL) & address_mask;
 		length = (~mask & address_mask) + 1;
 		printk(BIOS_DEBUG,
@@ -243,32 +243,32 @@ asmlinkage void soc_display_mtrrs(void)
 		variable_mtrrs = soc_display_mtrrcap();
 		soc_display_mtrr_def_type();
 		soc_display_64k_mtrr();
-		soc_display_16k_mtrr(MTRRfix16K_80000_MSR, 0x80000,
+		soc_display_16k_mtrr(MTRR_FIX_16K_80000, 0x80000,
 			"IA32_MTRR_FIX16K_80000");
-		soc_display_16k_mtrr(MTRRfix16K_A0000_MSR, 0xa0000,
+		soc_display_16k_mtrr(MTRR_FIX_16K_A0000, 0xa0000,
 			"IA32_MTRR_FIX16K_A0000");
-		soc_display_4k_mtrr(MTRRfix4K_C0000_MSR, 0xc0000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_C0000, 0xc0000,
 			"IA32_MTRR_FIX4K_C0000");
-		soc_display_4k_mtrr(MTRRfix4K_C8000_MSR, 0xc8000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_C8000, 0xc8000,
 			"IA32_MTRR_FIX4K_C8000");
-		soc_display_4k_mtrr(MTRRfix4K_D0000_MSR, 0xd0000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_D0000, 0xd0000,
 			"IA32_MTRR_FIX4K_D0000");
-		soc_display_4k_mtrr(MTRRfix4K_D8000_MSR, 0xd8000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_D8000, 0xd8000,
 			"IA32_MTRR_FIX4K_D8000");
-		soc_display_4k_mtrr(MTRRfix4K_E0000_MSR, 0xe0000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_E0000, 0xe0000,
 			"IA32_MTRR_FIX4K_E0000");
-		soc_display_4k_mtrr(MTRRfix4K_E8000_MSR, 0xe8000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_E8000, 0xe8000,
 			"IA32_MTRR_FIX4K_E8000");
-		soc_display_4k_mtrr(MTRRfix4K_F0000_MSR, 0xf0000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_F0000, 0xf0000,
 			"IA32_MTRR_FIX4K_F0000");
-		soc_display_4k_mtrr(MTRRfix4K_F8000_MSR, 0xf8000,
+		soc_display_4k_mtrr(MTRR_FIX_4K_F8000, 0xf8000,
 			"IA32_MTRR_FIX4K_F8000");
 		address_bits = cpu_phys_address_size();
 		address_mask = (1ULL << address_bits) - 1;
 
 		/* Display the variable MTRRs */
 		for (i = 0; i < variable_mtrrs; i++)
-			soc_display_variable_mtrr(MTRRphysBase_MSR(i), i,
+			soc_display_variable_mtrr(MTRR_PHYS_BASE(i), i,
 				address_mask);
 	}
 }
diff --git a/src/soc/intel/fsp_baytrail/bootblock/bootblock.c b/src/soc/intel/fsp_baytrail/bootblock/bootblock.c
index 50d321b..41b911e 100644
--- a/src/soc/intel/fsp_baytrail/bootblock/bootblock.c
+++ b/src/soc/intel/fsp_baytrail/bootblock/bootblock.c
@@ -52,10 +52,10 @@ static void set_var_mtrr(int reg, uint32_t base, uint32_t size, int type)
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 /*
@@ -80,7 +80,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void setup_mmconfig(void)
diff --git a/src/soc/intel/fsp_baytrail/cpu.c b/src/soc/intel/fsp_baytrail/cpu.c
index c7ee582..8fe1df3 100644
--- a/src/soc/intel/fsp_baytrail/cpu.c
+++ b/src/soc/intel/fsp_baytrail/cpu.c
@@ -173,10 +173,10 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
 	/* Set up SMRR. */
 	smrr.lo = relo_attrs.smrr_base;
 	smrr.hi = 0;
-	wrmsr(SMRRphysBase_MSR, smrr);
+	wrmsr(SMRR_PHYS_BASE, smrr);
 	smrr.lo = relo_attrs.smrr_mask;
 	smrr.hi = 0;
-	wrmsr(SMRRphysMask_MSR, smrr);
+	wrmsr(SMRR_PHYS_MASK, smrr);
 
 	/* The relocated handler runs with all CPUs concurrently. Therefore
 	 * stagger the entry points adjusting SMBASE downwards by save state
@@ -243,7 +243,7 @@ static int smm_load_handlers(void)
 	relo_attrs.smbase = (uint32_t)smm_region_start();
 	relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
 	relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask;
-	relo_attrs.smrr_mask |= MTRRphysMaskValid;
+	relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
 
 	/* Install handlers. */
 	if (install_relocation_handler(pattrs->num_cpus) < 0) {
diff --git a/src/soc/intel/skylake/bootblock/cpu.c b/src/soc/intel/skylake/bootblock/cpu.c
index 3a29972..70bf928 100644
--- a/src/soc/intel/skylake/bootblock/cpu.c
+++ b/src/soc/intel/skylake/bootblock/cpu.c
@@ -45,10 +45,10 @@ static void set_var_mtrr(
 	msr_t basem, maskm;
 	basem.lo = base | type;
 	basem.hi = 0;
-	wrmsr(MTRRphysBase_MSR(reg), basem);
-	maskm.lo = ~(size - 1) | MTRRphysMaskValid;
+	wrmsr(MTRR_PHYS_BASE(reg), basem);
+	maskm.lo = ~(size - 1) | MTRR_PHYS_MASK_VALID;
 	maskm.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
-	wrmsr(MTRRphysMask_MSR(reg), maskm);
+	wrmsr(MTRR_PHYS_MASK(reg), maskm);
 }
 
 static void enable_rom_caching(void)
@@ -62,7 +62,7 @@ static void enable_rom_caching(void)
 	/* Enable Variable MTRRs */
 	msr.hi = 0x00000000;
 	msr.lo = 0x00000800;
-	wrmsr(MTRRdefType_MSR, msr);
+	wrmsr(MTRR_DEF_TYPE_MSR, msr);
 }
 
 static void bootblock_mdelay(int ms)
@@ -164,14 +164,14 @@ static void set_flex_ratio_to_tdp_nominal(void)
 static void check_for_clean_reset(void)
 {
 	msr_t msr;
-	msr = rdmsr(MTRRdefType_MSR);
+	msr = rdmsr(MTRR_DEF_TYPE_MSR);
 
 	/*
 	 * Use the MTRR default type MSR as a proxy for detecting INIT#.
 	 * Reset the system if any known bits are set in that MSR. That is
 	 * an indication of the CPU not being properly reset.
 	 */
-	if (msr.lo & (MTRRdefTypeEn | MTRRdefTypeFixEn))
+	if (msr.lo & (MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN))
 		soft_reset();
 }
 
@@ -191,7 +191,7 @@ static void patch_microcode(void)
 	 * MTRRCAP[12]. Check for this feature and avoid reloading the
 	 * same microcode during early cpu initialization.
 	 */
-	msr = rdmsr(MTRRcap_MSR);
+	msr = rdmsr(MTRR_CAP_MSR);
 	if ((msr.lo & PRMRR_SUPPORTED) && (current_rev != patch->rev - 1))
 		intel_update_microcode_from_cbfs();
 }
diff --git a/src/soc/intel/skylake/cpu.c b/src/soc/intel/skylake/cpu.c
index 0cc9bca..ba1a96c 100644
--- a/src/soc/intel/skylake/cpu.c
+++ b/src/soc/intel/skylake/cpu.c
@@ -467,6 +467,6 @@ int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
 	 * MTRRCAP[12]. Check for this feature and avoid reloading the
 	 * same microcode during cpu initialization.
 	 */
-	msr = rdmsr(MTRRcap_MSR);
+	msr = rdmsr(MTRR_CAP_MSR);
 	return (msr.lo & PRMRR_SUPPORTED) && (current_patch_id == new_patch_id - 1);
 }
diff --git a/src/soc/intel/skylake/include/soc/msr.h b/src/soc/intel/skylake/include/soc/msr.h
index 4239b36..d514231 100644
--- a/src/soc/intel/skylake/include/soc/msr.h
+++ b/src/soc/intel/skylake/include/soc/msr.h
@@ -103,7 +103,7 @@
 #define SMBASE_MSR			0xc20
 #define IEDBASE_MSR			0xc22
 
-/* MTRRcap_MSR bits */
+/* MTRR_CAP_MSR bits */
 #define SMRR_SUPPORTED (1<<11)
 #define PRMRR_SUPPORTED (1<<12)
 
diff --git a/src/soc/intel/skylake/smmrelocate.c b/src/soc/intel/skylake/smmrelocate.c
index 4b6f1c4..8d2c545 100644
--- a/src/soc/intel/skylake/smmrelocate.c
+++ b/src/soc/intel/skylake/smmrelocate.c
@@ -44,8 +44,8 @@ static inline void write_smrr(struct smm_relocation_params *relo_params)
 {
 	printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
 	       relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
-	wrmsr(SMRRphysBase_MSR, relo_params->smrr_base);
-	wrmsr(SMRRphysMask_MSR, relo_params->smrr_mask);
+	wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base);
+	wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask);
 }
 
 static inline void write_uncore_emrr(struct smm_relocation_params *relo_params)
@@ -191,7 +191,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
 	update_save_state(cpu, relo_params, runtime);
 
 	/* Write EMRR and SMRR MSRs based on indicated support. */
-	mtrr_cap = rdmsr(MTRRcap_MSR);
+	mtrr_cap = rdmsr(MTRR_CAP_MSR);
 	if (mtrr_cap.lo & SMRR_SUPPORTED)
 		write_smrr(relo_params);
 }
@@ -230,7 +230,7 @@ static void fill_in_relocation_params(device_t dev,
 	/* SMRR has 32-bits of valid address aligned to 4KiB. */
 	params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
 	params->smrr_base.hi = 0;
-	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRRphysMaskValid;
+	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
 	params->smrr_mask.hi = 0;
 
 	/* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
@@ -243,14 +243,14 @@ static void fill_in_relocation_params(device_t dev,
 	 */
 	params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
 	params->emrr_base.hi = 0;
-	params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRRphysMaskValid;
+	params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
 	params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
 
 	/* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */
 	params->uncore_emrr_base.lo = emrr_base;
 	params->uncore_emrr_base.hi = 0;
 	params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) |
-					MTRRphysMaskValid;
+					MTRR_PHYS_MASK_VALID;
 	params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1;
 }
 



More information about the coreboot-gerrit mailing list