Patrick Georgi (pgeorgi@google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/10009
-gerrit
commit 2f452b4492df58f7e7f1ca556fed9b1a6c0b88c4 Author: Jimmy Huang jimmy.huang@mediatek.com Date: Mon Apr 13 20:28:38 2015 +0800
arch/arm64: update mmu translation table granule size, logic and macros
1. change mmu granule size from 64KB to 4KB 2. correct level 1 translation table creation logic 3. automatically calculate granule size related macros
BRANCH=none BUG=none TEST=boot to kernel on oak board
Change-Id: I9e99a3017033f6870b1735ac8faabb267c7be0a4 Signed-off-by: Patrick Georgi pgeorgi@chromium.org Original-Commit-Id: 2f18c4d5d9902f2830db82720c5543af270a7e3c Original-Change-Id: Ia27a414ab7578d70b00c36f9c063983397ba7927 Original-Signed-off-by: Jimmy Huang jimmy.huang@mediatek.com Original-Reviewed-on: https://chromium-review.googlesource.com/265603 Original-Reviewed-by: Aaron Durbin adurbin@chromium.org Original-Commit-Queue: Yidi Lin yidi.lin@mediatek.com Original-Tested-by: Yidi Lin yidi.lin@mediatek.com --- src/arch/arm64/armv8/mmu.c | 33 +++++++++++++------ src/arch/arm64/include/armv8/arch/mmu.h | 58 ++++++++++----------------------- 2 files changed, 40 insertions(+), 51 deletions(-)
diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c index 84b9935..99c97fb 100644 --- a/src/arch/arm64/armv8/mmu.c +++ b/src/arch/arm64/armv8/mmu.c @@ -164,20 +164,33 @@ static uint64_t init_xlat_table(uint64_t base_addr, uint64_t attr = get_block_attr(tag);
/* L1 table lookup */ - /* If VA has bits more than 41, lookup starts at L1 */ - if (l1_index) { - table = get_next_level_table(&table[l1_index]); - if (!table) - return 0; + /* If VA has bits more than L2 can resolve, lookup starts at L1 + Assumption: we don't need L0 table in coreboot */ + if (BITS_PER_VA > L1_ADDR_SHIFT) { + if ((size >= L1_XLAT_SIZE) && + IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) { + /* If block address is aligned and size is greater than + * or equal to size addressed by each L1 entry, we can + * directly store a block desc */ + desc = base_addr | BLOCK_DESC | attr; + table[l1_index] = desc; + /* L2 lookup is not required */ + return L1_XLAT_SIZE; + } else { + table = get_next_level_table(&table[l1_index]); + if (!table) + return 0; + } }
/* L2 table lookup */ /* If lookup was performed at L1, L2 table addr is obtained from L1 desc else, lookup starts at ttbr address */ - if (!l3_index && (size >= L2_XLAT_SIZE)) { - /* If block address is aligned and size is greater than or equal - to 512MiB i.e. size addressed by each L2 entry, we can - directly store a block desc */ + if ((size >= L2_XLAT_SIZE) && + IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) { + /* If block address is aligned and size is greater than + * or equal to size addressed by each L2 entry, we can + * directly store a block desc */ desc = base_addr | BLOCK_DESC | attr; table[l2_index] = desc; /* L3 lookup is not required */ @@ -279,7 +292,7 @@ void mmu_enable(void)
/* Initialize TCR flags */ raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | - TCR_SH0_IS | TCR_TG0_64KB | TCR_PS_64GB | + TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB | TCR_TBI_USED);
/* Initialize TTBR */ diff --git a/src/arch/arm64/include/armv8/arch/mmu.h b/src/arch/arm64/include/armv8/arch/mmu.h index 564d6af..59d2158 100644 --- a/src/arch/arm64/include/armv8/arch/mmu.h +++ b/src/arch/arm64/include/armv8/arch/mmu.h @@ -22,15 +22,6 @@
#include <memrange.h>
-/* IMPORTANT!!!!!!! - * Assumptions made: - * Granule size is 64KiB - * BITS per Virtual address is 33 - * All the calculations for tables L1,L2 and L3 are based on these assumptions - * If these values are changed, recalculate the other macros as well - */ - - /* Memory attributes for mmap regions * These attributes act as tag values for memrange regions */ @@ -74,46 +65,31 @@ /* XLAT Table Init Attributes */
#define VA_START 0x0 -/* If BITS_PER_VA or GRANULE_SIZE are changed, recalculate and change the - macros following them */ #define BITS_PER_VA 33 -/* Granule size of 64KB is being used */ -#define GRANULE_SIZE_SHIFT 16 +/* Granule size of 4KB is being used */ +#define GRANULE_SIZE_SHIFT 12 #define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT) -#define XLAT_TABLE_MASK ~(0xffffUL) -#define GRANULE_SIZE_MASK ((1 << 16) - 1) - -#define L1_ADDR_SHIFT 42 -#define L2_ADDR_SHIFT 29 -#define L3_ADDR_SHIFT 16 - -#define L1_ADDR_MASK (0UL << L1_ADDR_SHIFT) -#define L2_ADDR_MASK (0xfUL << L2_ADDR_SHIFT) -#define L3_ADDR_MASK (0x1fffUL << L3_ADDR_SHIFT) - -/* Dependent on BITS_PER_VA and GRANULE_SIZE */ -#define INIT_LEVEL 2 -#define XLAT_MAX_LEVEL 3 - -/* Each entry in XLAT table is 8 bytes */ -#define XLAT_ENTRY_SHIFT 3 -#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SHIFT) +#define XLAT_TABLE_MASK (~(0UL) << GRANULE_SIZE_SHIFT) +#define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1)
-#define XLAT_TABLE_SHIFT GRANULE_SIZE_SHIFT -#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SHIFT) +#define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3) +#define L1_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 2) +#define L2_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 1) +#define L3_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 0)
-#define XLAT_NUM_ENTRIES_SHIFT (XLAT_TABLE_SHIFT - XLAT_ENTRY_SHIFT) -#define XLAT_NUM_ENTRIES (1 << XLAT_NUM_ENTRIES_SHIFT) +#if BITS_PER_VA > L1_ADDR_SHIFT + BITS_RESOLVED_PER_LVL + #error "BITS_PER_VA too large (we don't have L0 table support)" +#endif
-#define L3_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT) -#define L2_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT) -#define L1_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT) +#define L1_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L1_ADDR_SHIFT) +#define L2_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L2_ADDR_SHIFT) +#define L3_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L3_ADDR_SHIFT)
/* These macros give the size of the region addressed by each entry of a xlat table at any given level */ -#define L3_XLAT_SIZE (1 << L3_XLAT_SIZE_SHIFT) -#define L2_XLAT_SIZE (1 << L2_XLAT_SIZE_SHIFT) -#define L1_XLAT_SIZE (1 << L1_XLAT_SIZE_SHIFT) +#define L3_XLAT_SIZE (1UL << L3_ADDR_SHIFT) +#define L2_XLAT_SIZE (1UL << L2_ADDR_SHIFT) +#define L1_XLAT_SIZE (1UL << L1_ADDR_SHIFT)
/* Block indices required for MAIR */ #define BLOCK_INDEX_MEM_DEV_NGNRNE 0