[coreboot-gerrit] Patch set updated for coreboot: 4888484 arm64: Decouple MMU functions from memranges

Patrick Georgi (pgeorgi@google.com) gerrit at coreboot.org
Mon Jun 1 10:56:09 CEST 2015


Patrick Georgi (pgeorgi at google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/10304

-gerrit

commit 4888484c0d1ee5ab622c3d936a9f5bdcc56fd557
Author: Julius Werner <jwerner at chromium.org>
Date:   Mon May 18 13:11:12 2015 -0700

    arm64: Decouple MMU functions from memranges
    
    The current arm64 MMU interface is difficult to use in pre-RAM
    environments. It is based on the memranges API which makes use of
    malloc(), and early stages usually don't have a heap. It is also built
    as a one-shot interface that requires all memory ranges to be laid out
    beforehand, which is a problem when existing areas need to change (e.g.
    after initializing DRAM).
    
    The long-term goal of this patch is to completely switch to a
    configure-as-you-go interface based on the mmu_config_range() function,
    similar to what ARM32 does. As a first step this feature is added
    side-by-side to the existing interface so that existing SoC
    implementations continue to work and can be slowly ported over one by
    one. Like the ARM32 version it does not garbage collect page tables that
    become unused, so repeated mapping at different granularities will
    exhaust the available table space (this is presumed to be a reasonable
    limitation for a firmware environment and keeps the code much simpler).
    
    Also do some cleanup, align comments between coreboot and libpayload for
    easier diffing, and change all error cases to assert()s. Right now the
    code just propagates error codes up the stack until it eventually
    reaches a function that doesn't check them anymore. MMU configuration
    errors (essentially just misaligned requests and running out of table
    space) should always be compile-time programming errors, so failing hard
    and fast seems like the best way to deal with them.
    
    BRANCH=None
    BUG=None
    TEST=Compile-tested rush_ryu. Booted on Oak and hacked MMU init to use
    mmu_config_range() insted of memranges. Confirmed that CRCs over all page
    tables before and after the change are equal.
    
    Change-Id: I93585b44a277c1d96d31ee9c3dd2522b5e10085b
    Signed-off-by: Patrick Georgi <pgeorgi at chromium.org>
    Original-Commit-Id: f10fcba107aba1f3ea239471cb5a4f9239809539
    Original-Change-Id: I6a2a11e3b94e6ae9e1553871f0cccd3b556b3e65
    Original-Signed-off-by: Julius Werner <jwerner at chromium.org>
    Original-Reviewed-on: https://chromium-review.googlesource.com/271991
    Original-Reviewed-by: Aaron Durbin <adurbin at chromium.org>
---
 payloads/libpayload/arch/arm64/mmu.c           | 193 ++++++++++---------------
 payloads/libpayload/include/arm64/arch/cache.h |  16 --
 payloads/libpayload/include/arm64/arch/mmu.h   |   3 +
 src/arch/arm64/armv8/mmu.c                     | 169 ++++++++++------------
 src/arch/arm64/include/armv8/arch/mmu.h        |   2 +
 5 files changed, 153 insertions(+), 230 deletions(-)

diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c
index f0781f5..e2a0cb1 100644
--- a/payloads/libpayload/arch/arm64/mmu.c
+++ b/payloads/libpayload/arch/arm64/mmu.c
@@ -44,6 +44,12 @@ static uint64_t *xlat_addr;
 static int free_idx;
 static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __attribute__((aligned(GRANULE_SIZE)));
 
+static const char * const tag_to_string[] = {
+	[TYPE_NORMAL_MEM] = "normal",
+	[TYPE_DEV_MEM] = "device",
+	[TYPE_DMA_MEM] = "uncached",
+};
+
 /*
  * The usedmem_ranges is used to describe all the memory ranges that are
  * actually used by payload i.e. _start -> _end in linker script and the
@@ -56,25 +62,12 @@ static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __attribute__((aligned(GRANULE_SIZE)
  */
 static struct mmu_ranges usedmem_ranges;
 
-static const uint64_t level_to_addr_mask[] = {
-	L1_ADDR_MASK,
-	L2_ADDR_MASK,
-	L3_ADDR_MASK,
-};
-
-static const uint64_t level_to_addr_shift[] = {
-	L1_ADDR_SHIFT,
-	L2_ADDR_SHIFT,
-	L3_ADDR_SHIFT,
-};
-
 static void __attribute__((noreturn)) mmu_error(void)
 {
 	halt();
 }
 
-/*
- * Func : get_block_attr
+/* Func : get_block_attr
  * Desc : Get block descriptor attributes based on the value of tag in memrange
  * region
  */
@@ -107,21 +100,7 @@ static uint64_t get_block_attr(unsigned long tag)
 	return attr;
 }
 
-/*
- * Func : get_index_from_addr
- * Desc : Get index into table at a given level using appropriate bits from the
- * base address
- */
-static uint64_t get_index_from_addr(uint64_t addr, uint8_t level)
-{
-	uint64_t mask = level_to_addr_mask[level-1];
-	uint8_t shift = level_to_addr_shift[level-1];
-
-	return ((addr & mask) >> shift);
-}
-
-/*
- * Func : table_desc_valid
+/* Func : table_desc_valid
  * Desc : Check if a table entry contains valid desc
  */
 static uint64_t table_desc_valid(uint64_t desc)
@@ -129,29 +108,34 @@ static uint64_t table_desc_valid(uint64_t desc)
 	return((desc & TABLE_DESC) == TABLE_DESC);
 }
 
-/*
- * Func : get_new_table
- * Desc : Return the next free XLAT table from ttb buffer
+/* Func : setup_new_table
+ * Desc : Get next free table from TTB and set it up to match old parent entry.
  */
-static uint64_t *get_new_table(void)
+static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
 {
-	uint64_t *new;
+	uint64_t *new, *entry;
 
-	if (free_idx >= max_tables) {
-		printf("ARM64 MMU: No free table\n");
-		return NULL;
-	}
+	assert(free_idx < max_tables);
 
 	new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
 	free_idx++;
 
-	memset(new, 0, GRANULE_SIZE);
+	if (!desc) {
+		memset(new, 0, GRANULE_SIZE);
+	} else {
+		/* Can reuse old parent entry, but may need to adjust type. */
+		if (xlat_size == L3_XLAT_SIZE)
+			desc |= PAGE_DESC;
+
+		for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE;
+		     entry++, desc += xlat_size)
+			*entry = desc;
+	}
 
 	return new;
 }
 
-/*
- * Func : get_table_from_desc
+/* Func : get_table_from_desc
  * Desc : Get next level table address from table descriptor
  */
 static uint64_t *get_table_from_desc(uint64_t desc)
@@ -160,27 +144,23 @@ static uint64_t *get_table_from_desc(uint64_t desc)
 	return ptr;
 }
 
-/*
- * Func: get_next_level_table
- * Desc: Check if the table entry is a valid descriptor. If not, allocate new
+/* Func: get_next_level_table
+ * Desc: Check if the table entry is a valid descriptor. If not, initialize new
  * table, update the entry and return the table addr. If valid, return the addr.
  */
-static uint64_t *get_next_level_table(uint64_t *ptr)
+static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
 {
 	uint64_t desc = *ptr;
 
 	if (!table_desc_valid(desc)) {
-		uint64_t *new_table = get_new_table();
-		if (new_table == NULL)
-			return NULL;
+		uint64_t *new_table = setup_new_table(desc, xlat_size);
 		desc = ((uint64_t)new_table) | TABLE_DESC;
 		*ptr = desc;
 	}
 	return get_table_from_desc(desc);
 }
 
-/*
- * Func : init_xlat_table
+/* Func : init_xlat_table
  * Desc : Given a base address and size, it identifies the indices within
  * different level XLAT tables which map the given base addr. Similar to table
  * walk, except that all invalid entries during the walk are updated
@@ -191,18 +171,16 @@ static uint64_t init_xlat_table(uint64_t base_addr,
 				uint64_t size,
 				uint64_t tag)
 {
-	uint64_t l1_index = get_index_from_addr(base_addr,1);
-	uint64_t l2_index = get_index_from_addr(base_addr,2);
-	uint64_t l3_index = get_index_from_addr(base_addr,3);
+	uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
+	uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
+	uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
 	uint64_t *table = xlat_addr;
 	uint64_t desc;
 	uint64_t attr = get_block_attr(tag);
 
-	/*
-	 * L1 table lookup
+	/* L1 table lookup
 	 * If VA has bits more than L2 can resolve, lookup starts at L1
-	 * Assumption: we don't need L0 table in coreboot
-	 */
+	 * Assumption: we don't need L0 table in coreboot */
 	if (BITS_PER_VA > L1_ADDR_SHIFT) {
 		if ((size >= L1_XLAT_SIZE) &&
 		    IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
@@ -213,94 +191,71 @@ static uint64_t init_xlat_table(uint64_t base_addr,
 			table[l1_index] = desc;
 			/* L2 lookup is not required */
 			return L1_XLAT_SIZE;
-		} else {
-			table = get_next_level_table(&table[l1_index]);
-			if (!table)
-				return 0;
 		}
+		table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
 	}
 
-	/*
-	 * L2 table lookup
+	/* L2 table lookup
 	 * If lookup was performed at L1, L2 table addr is obtained from L1 desc
-	 * else, lookup starts at ttbr address
-	 */
+	 * else, lookup starts at ttbr address */
 	if ((size >= L2_XLAT_SIZE) &&
 	    IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
-		/*
-		 * If block address is aligned and size is greater than or equal
-		 * to size addressed by each L2 entry, we can
-		 * directly store a block desc
-		 */
+		/* If block address is aligned and size is greater than
+		 * or equal to size addressed by each L2 entry, we can
+		 * directly store a block desc */
 		desc = base_addr | BLOCK_DESC | attr;
 		table[l2_index] = desc;
 		/* L3 lookup is not required */
 		return L2_XLAT_SIZE;
-	} else {
-		/* L2 entry stores a table descriptor */
-		table = get_next_level_table(&table[l2_index]);
-		if (!table)
-			return 0;
 	}
 
+	/* L2 entry stores a table descriptor */
+	table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
+
 	/* L3 table lookup */
 	desc = base_addr | PAGE_DESC | attr;
 	table[l3_index] = desc;
 	return L3_XLAT_SIZE;
 }
 
-/*
- * Func : sanity_check
- * Desc : Check if the address is aligned and size is atleast the granule size
+/* Func : sanity_check
+ * Desc : Check address/size alignment of a table or page.
  */
-static uint64_t sanity_check(uint64_t addr,
-			     uint64_t size)
+static void sanity_check(uint64_t addr, uint64_t size)
 {
-	/* Address should be atleast 64 KiB aligned */
-	if (addr & GRANULE_SIZE_MASK)
-		return 1;
-
-	/* Size should be atleast granule size */
-	if (size < GRANULE_SIZE)
-		return 1;
-
-	return 0;
+	assert(!(addr & GRANULE_SIZE_MASK) &&
+	       !(size & GRANULE_SIZE_MASK) &&
+	       size >= GRANULE_SIZE);
 }
 
-/*
- * Func : init_mmap_entry
- * Desc : For each mmap entry, this function calls init_xlat_table with the base
+/* Func : mmu_config_range
+ * Desc : This function repeatedly calls init_xlat_table with the base
  * address. Based on size returned from init_xlat_table, base_addr is updated
  * and subsequent calls are made for initializing the xlat table until the whole
  * region is initialized.
  */
-static void init_mmap_entry(struct mmu_memrange *r)
+void mmu_config_range(void *start, size_t size, uint64_t tag)
 {
-	uint64_t base_addr = r->base;
-	uint64_t size	   = r->size;
-	uint64_t tag	   = r->type;
+	uint64_t base_addr = (uintptr_t)start;
 	uint64_t temp_size = size;
 
-	while (temp_size) {
-		uint64_t ret;
-
-		if (sanity_check(base_addr,temp_size)) {
-			printf("Libpayload: ARM64 MMU: sanity check failed\n");
-			return;
-		}
-
-		ret = init_xlat_table(base_addr + (size - temp_size),
-				      temp_size, tag);
+	assert(tag < ARRAY_SIZE(tag_to_string));
+	printf("Libpayload: ARM64 MMU: Mapping address range [%p:%p) as %s\n",
+	       start, start + size, tag_to_string[tag]);
+	sanity_check(base_addr, temp_size);
 
-		if (ret == 0)
-			return;
+	while (temp_size)
+		temp_size -= init_xlat_table(base_addr + (size - temp_size),
+					     temp_size, tag);
 
-		temp_size -= ret;
-	}
+	/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
+	dsb();
+	tlbiall_current();
+	dsb();
+	isb();
 }
 
-/*
- * Func : mmu_init
+/* Func : mmu_init
  * Desc : Initialize mmu based on the mmu_memrange passed. ttb_buffer is used as
  * the base address for xlat tables. TTB_DEFAULT_SIZE defines the max number of
  * tables that can be used
@@ -308,8 +263,6 @@ static void init_mmap_entry(struct mmu_memrange *r)
  */
 uint64_t mmu_init(struct mmu_ranges *mmu_ranges)
 {
-	struct mmu_memrange devrange = { 0, 0x80000000, TYPE_DEV_MEM };
-
 	int i = 0;
 
 	xlat_addr = (uint64_t *)&ttb_buffer;
@@ -321,11 +274,12 @@ uint64_t mmu_init(struct mmu_ranges *mmu_ranges)
 	printf("Libpayload ARM64: TTB_BUFFER: 0x%p Max Tables: %d\n",
 	       (void*)xlat_addr, max_tables);
 
-	init_mmap_entry(&devrange);
+	mmu_config_range(NULL, 0x80000000, TYPE_DEV_MEM);
 
-	for (; i < mmu_ranges->used; i++) {
-		init_mmap_entry(&mmu_ranges->entries[i]);
-	}
+	for (; i < mmu_ranges->used; i++)
+		mmu_config_range((void *)mmu_ranges->entries[i].base,
+				 mmu_ranges->entries[i].size,
+				 mmu_ranges->entries[i].type);
 
 	printf("Libpayload ARM64: MMU init done\n");
 	return 0;
@@ -388,8 +342,7 @@ void mmu_enable(void)
 	/* Initialize TTBR */
 	raw_write_ttbr0_current((uintptr_t)xlat_addr);
 
-	/* Ensure all translation table writes are committed before enabling MMU */
-	dsb();
+	/* Ensure system register writes are committed before enabling MMU */
 	isb();
 
 	/* Enable MMU */
diff --git a/payloads/libpayload/include/arm64/arch/cache.h b/payloads/libpayload/include/arm64/arch/cache.h
index 7248869..de68cee 100644
--- a/payloads/libpayload/include/arm64/arch/cache.h
+++ b/payloads/libpayload/include/arm64/arch/cache.h
@@ -117,20 +117,4 @@ static inline void icache_invalidate_all(void)
 	: : : "memory");
 }
 
-/*
- * Generalized setup/init functions
- */
-
-enum dcache_policy {
-	DCACHE_OFF,
-	DCACHE_WRITEBACK,
-	DCACHE_WRITETHROUGH,
-};
-
-/* disable the mmu for a range. Primarily useful to lock out address 0. */
-void mmu_disable_range(unsigned long start_mb, unsigned long size_mb);
-/* mmu range configuration (set dcache policy) */
-void mmu_config_range(unsigned long start_mb, unsigned long size_mb,
-						enum dcache_policy policy);
-
 #endif /* ARM64_CACHE_H */
diff --git a/payloads/libpayload/include/arm64/arch/mmu.h b/payloads/libpayload/include/arm64/arch/mmu.h
index 79bc783..30a1973 100644
--- a/payloads/libpayload/include/arm64/arch/mmu.h
+++ b/payloads/libpayload/include/arm64/arch/mmu.h
@@ -175,6 +175,9 @@ void mmu_enable(void);
 /* Disable mmu */
 void mmu_disable(void);
 
+/* Change a memory type for a range of bytes at runtime. */
+void mmu_config_range(void *start, size_t size, uint64_t tag);
+
 /*
  * Based on the memory ranges provided in coreboot tables,
  * initialize the mmu_memranges used for mmu initialization
diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c
index 99c97fb..0bd678a 100644
--- a/src/arch/arm64/armv8/mmu.c
+++ b/src/arch/arm64/armv8/mmu.c
@@ -27,10 +27,12 @@
  * SUCH DAMAGE.
  */
 
+#include <assert.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <string.h>
 
+#include <console/console.h>
 #include <memrange.h>
 #include <arch/mmu.h>
 #include <arch/lib_helpers.h>
@@ -42,17 +44,17 @@ static unsigned int max_tables;
 static uint64_t *xlat_addr;
 static int free_idx;
 
-static const uint64_t level_to_addr_mask[] = {
-	L1_ADDR_MASK,
-	L2_ADDR_MASK,
-	L3_ADDR_MASK,
-};
-
-static const uint64_t level_to_addr_shift[] = {
-	L1_ADDR_SHIFT,
-	L2_ADDR_SHIFT,
-	L3_ADDR_SHIFT,
-};
+static void print_tag(int level, uint64_t tag)
+{
+	printk(level, tag & MA_MEM_NC ? "    cacheable | " :
+					"non-cacheable | ");
+	printk(level, tag & MA_RO ?	"read-only  | " :
+					"read-write | ");
+	printk(level, tag & MA_NS ?	"non-secure | " :
+					"    secure | ");
+	printk(level, tag & MA_MEM ?	"normal\n" :
+					"device\n");
+}
 
 /* Func : get_block_attr
  * Desc : Get block descriptor attributes based on the value of tag in memrange
@@ -79,18 +81,6 @@ static uint64_t get_block_attr(unsigned long tag)
 	return attr;
 }
 
-/* Func : get_index_from_addr
- * Desc : Get index into table at a given level using appropriate bits from the
- * base address
- */
-static uint64_t get_index_from_addr(uint64_t addr, uint8_t level)
-{
-	uint64_t mask = level_to_addr_mask[level-1];
-	uint8_t shift = level_to_addr_shift[level-1];
-
-	return ((addr & mask) >> shift);
-}
-
 /* Func : table_desc_valid
  * Desc : Check if a table entry contains valid desc
  */
@@ -99,21 +89,29 @@ static uint64_t table_desc_valid(uint64_t desc)
 	return((desc & TABLE_DESC) == TABLE_DESC);
 }
 
-/* Func : get_new_table
- * Desc : Return the next free XLAT table from ttb buffer
+/* Func : setup_new_table
+ * Desc : Get next free table from TTB and set it up to match old parent entry.
  */
-static uint64_t *get_new_table(void)
+static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
 {
-	uint64_t *new;
+	uint64_t *new, *entry;
 
-	if (free_idx >= max_tables) {
-		return NULL;
-	}
+	assert(free_idx < max_tables);
 
 	new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
 	free_idx++;
 
-	memset(new, 0, GRANULE_SIZE);
+	if (!desc) {
+		memset(new, 0, GRANULE_SIZE);
+	} else {
+		/* Can reuse old parent entry, but may need to adjust type. */
+		if (xlat_size == L3_XLAT_SIZE)
+			desc |= PAGE_DESC;
+
+		for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE;
+		     entry++, desc += xlat_size)
+			*entry = desc;
+	}
 
 	return new;
 }
@@ -128,17 +126,15 @@ static uint64_t *get_table_from_desc(uint64_t desc)
 }
 
 /* Func: get_next_level_table
- * Desc: Check if the table entry is a valid descriptor. If not, allocate new
+ * Desc: Check if the table entry is a valid descriptor. If not, initialize new
  * table, update the entry and return the table addr. If valid, return the addr
  */
-static uint64_t *get_next_level_table(uint64_t *ptr)
+static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
 {
 	uint64_t desc = *ptr;
 
 	if (!table_desc_valid(desc)) {
-		uint64_t *new_table = get_new_table();
-		if (new_table == NULL)
-			return NULL;
+		uint64_t *new_table = setup_new_table(desc, xlat_size);
 		desc = ((uint64_t)new_table) | TABLE_DESC;
 		*ptr = desc;
 	}
@@ -150,22 +146,22 @@ static uint64_t *get_next_level_table(uint64_t *ptr)
  * different level XLAT tables which map the given base addr. Similar to table
  * walk, except that all invalid entries during the walk are updated
  * accordingly. On success, it returns the size of the block/page addressed by
- * the final table
+ * the final table.
  */
 static uint64_t init_xlat_table(uint64_t base_addr,
 				uint64_t size,
 				uint64_t tag)
 {
-	uint64_t l1_index = get_index_from_addr(base_addr,1);
-	uint64_t l2_index = get_index_from_addr(base_addr,2);
-	uint64_t l3_index = get_index_from_addr(base_addr,3);
+	uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
+	uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
+	uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
 	uint64_t *table = xlat_addr;
 	uint64_t desc;
 	uint64_t attr = get_block_attr(tag);
 
-	/* L1 table lookup */
-	/* If VA has bits more than L2 can resolve, lookup starts at L1
-	   Assumption: we don't need L0 table in coreboot */
+	/* L1 table lookup
+	 * If VA has bits more than L2 can resolve, lookup starts at L1
+	 * Assumption: we don't need L0 table in coreboot */
 	if (BITS_PER_VA > L1_ADDR_SHIFT) {
 		if ((size >= L1_XLAT_SIZE) &&
 		    IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
@@ -176,16 +172,13 @@ static uint64_t init_xlat_table(uint64_t base_addr,
 			table[l1_index] = desc;
 			/* L2 lookup is not required */
 			return L1_XLAT_SIZE;
-		} else {
-			table = get_next_level_table(&table[l1_index]);
-			if (!table)
-				return 0;
 		}
+		table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
 	}
 
-	/* L2 table lookup */
-	/* If lookup was performed at L1, L2 table addr is obtained from L1 desc
-	   else, lookup starts at ttbr address */
+	/* L2 table lookup
+	 * If lookup was performed at L1, L2 table addr is obtained from L1 desc
+	 * else, lookup starts at ttbr address */
 	if ((size >= L2_XLAT_SIZE) &&
 	    IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
 		/* If block address is aligned and size is greater than
@@ -195,13 +188,11 @@ static uint64_t init_xlat_table(uint64_t base_addr,
 		table[l2_index] = desc;
 		/* L3 lookup is not required */
 		return L2_XLAT_SIZE;
-	} else {
-		/* L2 entry stores a table descriptor */
-		table = get_next_level_table(&table[l2_index]);
-		if (!table)
-			return 0;
 	}
 
+	/* L2 entry stores a table descriptor */
+	table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
+
 	/* L3 table lookup */
 	desc = base_addr | PAGE_DESC | attr;
 	table[l3_index] = desc;
@@ -209,50 +200,40 @@ static uint64_t init_xlat_table(uint64_t base_addr,
 }
 
 /* Func : sanity_check
- * Desc : Check if the address is aligned and size is atleast the granule size
+ * Desc : Check address/size alignment of a table or page.
  */
-static uint64_t sanity_check(uint64_t addr,
-			     uint64_t size)
+static void sanity_check(uint64_t addr, uint64_t size)
 {
-	/* Address should be atleast 64 KiB aligned */
-	if (addr & GRANULE_SIZE_MASK)
-		return 1;
-
-	/* Size should be atleast granule size */
-	if (size < GRANULE_SIZE)
-		return 1;
-
-	return 0;
+	assert(!(addr & GRANULE_SIZE_MASK) &&
+	       !(size & GRANULE_SIZE_MASK) &&
+	       size >= GRANULE_SIZE);
 }
 
-/* Func : init_mmap_entry
- * Desc : For each mmap entry, this function calls init_xlat_table with the base
+/* Func : mmu_config_range
+ * Desc : This function repeatedly calls init_xlat_table with the base
  * address. Based on size returned from init_xlat_table, base_addr is updated
  * and subsequent calls are made for initializing the xlat table until the whole
  * region is initialized.
  */
-static void init_mmap_entry(struct range_entry *r)
+void mmu_config_range(void *start, size_t size, uint64_t tag)
 {
-	uint64_t base_addr = range_entry_base(r);
-	uint64_t size      = range_entry_size(r);
-	uint64_t tag       = range_entry_tag(r);
+	uint64_t base_addr = (uintptr_t)start;
 	uint64_t temp_size = size;
 
-	while (temp_size) {
-		uint64_t ret;
-
-		if (sanity_check(base_addr,temp_size)) {
-			return;
-		}
+	printk(BIOS_INFO, "Mapping address range [%p:%p) as ",
+	       start, start + size);
+	print_tag(BIOS_INFO, tag);
+	sanity_check(base_addr, temp_size);
 
-		ret = init_xlat_table(base_addr + (size - temp_size),
-				      temp_size,tag);
+	while (temp_size)
+		temp_size -= init_xlat_table(base_addr + (size - temp_size),
+					     temp_size, tag);
 
-		if (ret == 0)
-			return;
-
-		temp_size -= ret;
-	}
+	/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
+	dsb();
+	tlbiall_current();
+	dsb();
+	isb();
 }
 
 /* Func : mmu_init
@@ -266,18 +247,19 @@ void mmu_init(struct memranges *mmap_ranges,
 {
 	struct range_entry *mmap_entry;
 
-	if (sanity_check((uint64_t)ttb_buffer, ttb_size)) {
-		return;
-	}
+	sanity_check((uint64_t)ttb_buffer, ttb_size);
 
 	memset((void*)ttb_buffer, 0, GRANULE_SIZE);
 	max_tables = (ttb_size >> GRANULE_SIZE_SHIFT);
 	xlat_addr = ttb_buffer;
 	free_idx = 1;
 
-	memranges_each_entry(mmap_entry, mmap_ranges) {
-		init_mmap_entry(mmap_entry);
-	}
+	if (mmap_ranges)
+		memranges_each_entry(mmap_entry, mmap_ranges) {
+			mmu_config_range((void *)range_entry_base(mmap_entry),
+					 range_entry_size(mmap_entry),
+					 range_entry_tag(mmap_entry));
+		}
 }
 
 void mmu_enable(void)
@@ -298,8 +280,7 @@ void mmu_enable(void)
 	/* Initialize TTBR */
 	raw_write_ttbr0_el3((uintptr_t)xlat_addr);
 
-	/* Ensure all translation table writes are committed before enabling MMU */
-	dsb();
+	/* Ensure system register writes are committed before enabling MMU */
 	isb();
 
 	/* Enable MMU */
diff --git a/src/arch/arm64/include/armv8/arch/mmu.h b/src/arch/arm64/include/armv8/arch/mmu.h
index c487c1f..f5111ae 100644
--- a/src/arch/arm64/include/armv8/arch/mmu.h
+++ b/src/arch/arm64/include/armv8/arch/mmu.h
@@ -148,5 +148,7 @@
 void mmu_init(struct memranges *ranges, uint64_t *ttb, uint64_t ttb_size);
 /* Enable the mmu based on previous mmu_init(). */
 void mmu_enable(void);
+/* Change a memory type for a range of bytes at runtime. */
+void mmu_config_range(void *start, size_t size, uint64_t tag);
 
 #endif // __ARCH_ARM64_MMU_H__



More information about the coreboot-gerrit mailing list