Evan Green has uploaded this change for review. ( https://review.coreboot.org/25639
Change subject: Revert "arch/arm64/armv8/mmu: Add support for 48bit VA" ......................................................................
Revert "arch/arm64/armv8/mmu: Add support for 48bit VA"
This reverts commit 57afc5e0f2309ba9f7fbd171642f04c6da9d9976.
Change-Id: I7c3a04d7d55b10736ed68ba96f892e2aaa1e3e2d --- M payloads/libpayload/arch/arm64/mmu.c M payloads/libpayload/include/arm64/arch/mmu.h M src/arch/arm64/armv8/mmu.c M src/arch/arm64/include/armv8/arch/mmu.h 4 files changed, 37 insertions(+), 37 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/39/25639/1
diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c index c860ee0..d84f969 100644 --- a/payloads/libpayload/arch/arm64/mmu.c +++ b/payloads/libpayload/arch/arm64/mmu.c @@ -172,7 +172,6 @@ uint64_t size, uint64_t tag) { - uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT; uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT; uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT; uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT; @@ -180,12 +179,12 @@ uint64_t desc; uint64_t attr = get_block_attr(tag);
- /* L0 entry stores a table descriptor (doesn't support blocks) */ - table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE); - - /* L1 table lookup */ - if ((size >= L1_XLAT_SIZE) && - IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) { + /* L1 table lookup + * If VA has bits more than L2 can resolve, lookup starts at L1 + * Assumption: we don't need L0 table in coreboot */ + if (BITS_PER_VA > L1_ADDR_SHIFT) { + if ((size >= L1_XLAT_SIZE) && + IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) { /* If block address is aligned and size is greater than * or equal to size addressed by each L1 entry, we can * directly store a block desc */ @@ -193,12 +192,13 @@ table[l1_index] = desc; /* L2 lookup is not required */ return L1_XLAT_SIZE; + } + table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE); }
- /* L1 entry stores a table descriptor */ - table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE); - - /* L2 table lookup */ + /* L2 table lookup + * If lookup was performed at L1, L2 table addr is obtained from L1 desc + * else, lookup starts at ttbr address */ if ((size >= L2_XLAT_SIZE) && IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) { /* If block address is aligned and size is greater than @@ -226,7 +226,6 @@ { assert(!(addr & GRANULE_SIZE_MASK) && !(size & GRANULE_SIZE_MASK) && - (addr + size < (1UL << BITS_PER_VA)) && size >= GRANULE_SIZE); }
@@ -345,7 +344,7 @@
/* Initialize TCR flags */ raw_write_tcr_current(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | - TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB | + TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB | TCR_TBI_USED);
/* Initialize TTBR */ diff --git a/payloads/libpayload/include/arm64/arch/mmu.h b/payloads/libpayload/include/arm64/arch/mmu.h index 3cea696..2f87d09 100644 --- a/payloads/libpayload/include/arm64/arch/mmu.h +++ b/payloads/libpayload/include/arm64/arch/mmu.h @@ -83,7 +83,7 @@ /* XLAT Table Init Attributes */
#define VA_START 0x0 -#define BITS_PER_VA 48 +#define BITS_PER_VA 33 #define MIN_64_BIT_ADDR (1UL << 32) /* Granule size of 4KB is being used */ #define GRANULE_SIZE_SHIFT 12 @@ -92,12 +92,14 @@ #define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1)
#define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3) -#define L0_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 3) #define L1_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 2) #define L2_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 1) #define L3_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 0)
-#define L0_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L0_ADDR_SHIFT) +#if BITS_PER_VA > L1_ADDR_SHIFT + BITS_RESOLVED_PER_LVL + #error "BITS_PER_VA too large (we don't have L0 table support)" +#endif + #define L1_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L1_ADDR_SHIFT) #define L2_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L2_ADDR_SHIFT) #define L3_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L3_ADDR_SHIFT) @@ -107,7 +109,6 @@ #define L3_XLAT_SIZE (1UL << L3_ADDR_SHIFT) #define L2_XLAT_SIZE (1UL << L2_ADDR_SHIFT) #define L1_XLAT_SIZE (1UL << L1_ADDR_SHIFT) -#define L0_XLAT_SIZE (1UL << L0_ADDR_SHIFT)
/* Block indices required for MAIR */ #define BLOCK_INDEX_MEM_DEV_NGNRNE 0 diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c index a24e7c6..55bd703 100644 --- a/src/arch/arm64/armv8/mmu.c +++ b/src/arch/arm64/armv8/mmu.c @@ -141,7 +141,6 @@ uint64_t size, uint64_t tag) { - uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT; uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT; uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT; uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT; @@ -149,12 +148,12 @@ uint64_t desc; uint64_t attr = get_block_attr(tag);
- /* L0 entry stores a table descriptor (doesn't support blocks) */ - table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE); - - /* L1 table lookup */ - if ((size >= L1_XLAT_SIZE) && - IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) { + /* L1 table lookup + * If VA has bits more than L2 can resolve, lookup starts at L1 + * Assumption: we don't need L0 table in coreboot */ + if (BITS_PER_VA > L1_ADDR_SHIFT) { + if ((size >= L1_XLAT_SIZE) && + IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) { /* If block address is aligned and size is greater than * or equal to size addressed by each L1 entry, we can * directly store a block desc */ @@ -162,12 +161,13 @@ table[l1_index] = desc; /* L2 lookup is not required */ return L1_XLAT_SIZE; + } + table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE); }
- /* L1 entry stores a table descriptor */ - table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE); - - /* L2 table lookup */ + /* L2 table lookup + * If lookup was performed at L1, L2 table addr is obtained from L1 desc + * else, lookup starts at ttbr address */ if ((size >= L2_XLAT_SIZE) && IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) { /* If block address is aligned and size is greater than @@ -195,7 +195,6 @@ { assert(!(addr & GRANULE_SIZE_MASK) && !(size & GRANULE_SIZE_MASK) && - (addr + size < (1UL << BITS_PER_VA)) && size >= GRANULE_SIZE); }
@@ -203,7 +202,7 @@ * Desc : Returns the page table entry governing a specific address. */ static uint64_t get_pte(void *addr) { - int shift = L0_ADDR_SHIFT; + int shift = BITS_PER_VA > L1_ADDR_SHIFT ? L1_ADDR_SHIFT : L2_ADDR_SHIFT; uint64_t *pte = (uint64_t *)_ttb;
while (1) { @@ -258,8 +257,8 @@ for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table)) table[0] = UNUSED_DESC;
- /* Initialize the root table (L0) to be completely unmapped. */ - uint64_t *root = setup_new_table(INVALID_DESC, L0_XLAT_SIZE); + /* Initialize the root table (L1) to be completely unmapped. */ + uint64_t *root = setup_new_table(INVALID_DESC, L1_XLAT_SIZE); assert((u8 *)root == _ttb);
/* Initialize TTBR */ @@ -270,7 +269,7 @@
/* Initialize TCR flags */ raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | - TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB | + TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB | TCR_TBI_USED); }
diff --git a/src/arch/arm64/include/armv8/arch/mmu.h b/src/arch/arm64/include/armv8/arch/mmu.h index f0e551e..a812073 100644 --- a/src/arch/arm64/include/armv8/arch/mmu.h +++ b/src/arch/arm64/include/armv8/arch/mmu.h @@ -69,7 +69,7 @@ /* XLAT Table Init Attributes */
#define VA_START 0x0 -#define BITS_PER_VA 48 +#define BITS_PER_VA 33 /* Granule size of 4KB is being used */ #define GRANULE_SIZE_SHIFT 12 #define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT) @@ -77,12 +77,14 @@ #define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1)
#define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3) -#define L0_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 3) #define L1_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 2) #define L2_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 1) #define L3_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 0)
-#define L0_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L0_ADDR_SHIFT) +#if BITS_PER_VA > L1_ADDR_SHIFT + BITS_RESOLVED_PER_LVL + #error "BITS_PER_VA too large (we don't have L0 table support)" +#endif + #define L1_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L1_ADDR_SHIFT) #define L2_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L2_ADDR_SHIFT) #define L3_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L3_ADDR_SHIFT) @@ -92,7 +94,6 @@ #define L3_XLAT_SIZE (1UL << L3_ADDR_SHIFT) #define L2_XLAT_SIZE (1UL << L2_ADDR_SHIFT) #define L1_XLAT_SIZE (1UL << L1_ADDR_SHIFT) -#define L0_XLAT_SIZE (1UL << L0_ADDR_SHIFT)
/* Block indices required for MAIR */ #define BLOCK_INDEX_MEM_DEV_NGNRNE 0