[coreboot-gerrit] New patch to review for coreboot: intel/nehalem post-car: Redo MTRR settings and stack selection

Kyösti Mälkki (kyosti.malkki@gmail.com) gerrit at coreboot.org
Fri Jul 22 15:50:08 CEST 2016


Kyösti Mälkki (kyosti.malkki at gmail.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/15793

-gerrit

commit 71e144356bacbb2649c5751bbf7b91b696703ffa
Author: Kyösti Mälkki <kyosti.malkki at gmail.com>
Date:   Fri Jul 22 09:58:54 2016 +0300

    intel/nehalem post-car: Redo MTRR settings and stack selection
    
    Adapt implementation from haswell to prepare for removal of HIGH_MEMORY_SAVE
    and moving on to RELOCATABLE_RAMSTAGE. With the change, CBMEM and SMM regions
    are set to WRBACK with MTRRs and romstage ram stack is moved to CBMEM.
    
    Change-Id: I84f6fa6f37a7348b2d4ad9f08a18bebe4b1e34e2
    Signed-off-by: Kyösti Mälkki <kyosti.malkki at gmail.com>
---
 src/cpu/intel/model_2065x/cache_as_ram.inc | 61 +++++++++++-----------
 src/cpu/intel/model_2065x/postcar.c        | 84 +++++++++++++++++++++++++++++-
 2 files changed, 112 insertions(+), 33 deletions(-)

diff --git a/src/cpu/intel/model_2065x/cache_as_ram.inc b/src/cpu/intel/model_2065x/cache_as_ram.inc
index 29ff01a..c117b33 100644
--- a/src/cpu/intel/model_2065x/cache_as_ram.inc
+++ b/src/cpu/intel/model_2065x/cache_as_ram.inc
@@ -156,8 +156,8 @@ clear_var_mtrrs:
 	andl	$(~(CR0_CacheDisable | CR0_NoWriteThrough)), %eax
 	movl	%eax, %cr0
 
-	/* Set up the stack pointer below the end of CAR. */
-	movl	$(CACHE_AS_RAM_SIZE + CACHE_AS_RAM_BASE - 4), %eax
+	/* Setup the stack. */
+	movl	$(CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE), %eax
 	movl	%eax, %esp
 
 	/* Restore the BIST result. */
@@ -169,10 +169,9 @@ before_romstage:
 	post_code(0x29)
 	/* Call romstage.c main function. */
 	call	romstage_main
-
 	/* Save return value from romstage_main. It contains the stack to use
-	 * after cache-as-ram is torn down.
-	 */
+	 * after cache-as-ram is torn down. It also contains the information
+	 * for setting up MTRRs. */
 	movl	%eax, %ebx
 
 	post_code(0x30)
@@ -221,32 +220,34 @@ before_romstage:
 
 	post_code(0x38)
 
-	/* Enable Write Back and Speculative Reads for the first MB
-	 * and ramstage.
-	 */
-	movl	$MTRR_PHYS_BASE(0), %ecx
-	movl	$(0x00000000 | MTRR_TYPE_WRBACK), %eax
-	xorl	%edx, %edx
-	wrmsr
-	movl	$MTRR_PHYS_MASK(0), %ecx
-	movl	$(~(CACHE_TMP_RAMTOP - 1) | MTRR_PHYS_MASK_VALID), %eax
-	movl	$CPU_PHYSMASK_HI, %edx	// 36bit address space
-	wrmsr
+	/* Setup stack as indicated by return value from romstage_main(). */
+	movl	%ebx, %esp
 
-#if CACHE_ROM_SIZE
-	/* Enable Caching and speculative Reads for the
-	 * complete ROM now that we actually have RAM.
-	 */
-	movl	$MTRR_PHYS_BASE(1), %ecx
-	movl	$(CACHE_ROM_BASE | MTRR_TYPE_WRPROT), %eax
-	xorl	%edx, %edx
+	/* Get number of MTRRs. */
+	popl	%ebx
+	movl	$MTRR_PHYS_BASE(0), %ecx
+1:
+	testl	%ebx, %ebx
+	jz	1f
+
+	/* Low 32 bits of MTRR base. */
+	popl	%eax
+	/* Upper 32 bits of MTRR base. */
+	popl	%edx
+	/* Write MTRR base. */
 	wrmsr
-	movl	$MTRR_PHYS_MASK(1), %ecx
-	movl	$(~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
-	movl	$CPU_PHYSMASK_HI, %edx
+	inc	%ecx
+	/* Low 32 bits of MTRR mask. */
+	popl	%eax
+	/* Upper 32 bits of MTRR mask. */
+	popl	%edx
+	/* Write MTRR mask. */
 	wrmsr
-#endif
+	inc	%ecx
 
+	dec	%ebx
+	jmp	1b
+1:
 	post_code(0x39)
 
 	/* And enable cache again after setting MTRRs. */
@@ -272,11 +273,7 @@ before_romstage:
 __main:
 	post_code(POST_PREPARE_RAMSTAGE)
 	cld			/* Clear direction flag. */
-
-	/* Setup stack as indicated by return value from romstage_main(). */
-	movl	%ebx, %esp
-	movl	%esp, %ebp
-	call	copy_and_run
+	call	romstage_after_car
 
 .Lhlt:
 	post_code(POST_DEAD_CODE)
diff --git a/src/cpu/intel/model_2065x/postcar.c b/src/cpu/intel/model_2065x/postcar.c
index 062a509..04692ac 100644
--- a/src/cpu/intel/model_2065x/postcar.c
+++ b/src/cpu/intel/model_2065x/postcar.c
@@ -13,9 +13,91 @@
  * GNU General Public License for more details.
  */
 
+#include <arch/cpu.h>
+#include <cbmem.h>
+#include <cpu/x86/mtrr.h>
 #include <cpu/intel/romstage.h>
+#include <program_loading.h>
 
+static inline u32 *stack_push(u32 *stack, u32 value)
+{
+	stack = &stack[-1];
+	*stack = value;
+	return stack;
+}
+
+/* setup_romstage_stack_after_car() determines the stack to use after
+ * cache-as-ram is torn down as well as the MTRR settings to use. */
 void *setup_romstage_stack_after_car(void)
 {
-	return (void*)CONFIG_RAMTOP;
+	uintptr_t top_of_stack;
+	int num_mtrrs;
+	u32 *slot;
+	u32 mtrr_mask_upper;
+	u32 top_of_ram;
+
+	/* Top of stack needs to be aligned to a 4-byte boundary. */
+	top_of_stack = romstage_ram_stack_top() & ~3;
+	slot = (void *)top_of_stack;
+	num_mtrrs = 0;
+
+	/* The upper bits of the MTRR mask need to set according to the number
+	 * of physical address bits. */
+	mtrr_mask_upper = (1 << (cpu_phys_address_size() - 32)) - 1;
+
+	/* The order for each MTRR is value then base with upper 32-bits of
+	 * each value coming before the lower 32-bits. The reasoning for
+	 * this ordering is to create a stack layout like the following:
+	 *   +0: Number of MTRRs
+	 *   +4: MTRR base 0 31:0
+	 *   +8: MTRR base 0 63:32
+	 *  +12: MTRR mask 0 31:0
+	 *  +16: MTRR mask 0 63:32
+	 *  +20: MTRR base 1 31:0
+	 *  +24: MTRR base 1 63:32
+	 *  +28: MTRR mask 1 31:0
+	 *  +32: MTRR mask 1 63:32
+	 */
+
+	/* Cache the ROM as WP just below 4GiB. */
+	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
+	slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID);
+	slot = stack_push(slot, 0); /* upper base */
+	slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
+	num_mtrrs++;
+
+	/* Cache RAM as WB from 0 -> CACHE_TMP_RAMTOP. */
+	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
+	slot = stack_push(slot, ~(CACHE_TMP_RAMTOP - 1) | MTRR_PHYS_MASK_VALID);
+	slot = stack_push(slot, 0); /* upper base */
+	slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
+	num_mtrrs++;
+
+	top_of_ram = (uint32_t)cbmem_top();
+	/* Cache 8MiB below the top of ram. On haswell systems the top of
+	 * ram under 4GiB is the start of the TSEG region. It is required to
+	 * be 8MiB aligned. Set this area as cacheable so it can be used later
+	 * for ramstage before setting up the entire RAM as cacheable. */
+	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
+	slot = stack_push(slot, 0); /* upper base */
+	slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
+	num_mtrrs++;
+
+	/* Cache 8MiB at the top of ram. Top of ram on haswell systems
+	 * is where the TSEG region resides. However, it is not restricted
+	 * to SMM mode until SMM has been relocated. By setting the region
+	 * to cacheable it provides faster access when relocating the SMM
+	 * handler as well as using the TSEG region for other purposes. */
+	slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
+	slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
+	slot = stack_push(slot, 0); /* upper base */
+	slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
+	num_mtrrs++;
+
+	/* Save the number of MTRRs to setup. Return the stack location
+	 * pointing to the number of MTRRs. */
+	slot = stack_push(slot, num_mtrrs);
+
+	return slot;
 }



More information about the coreboot-gerrit mailing list