Patrick Georgi (pgeorgi(a)google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/8792
-gerrit
commit 307e31fabbeefd5671eada9d3d31691aa2ff956b
Author: Furquan Shaikh <furquan(a)google.com>
Date: Thu Sep 4 15:33:00 2014 -0700
libpayload arm64: Initialize and enable MMU
What this change does:
1) Initialize limited page tables as soon as we jump into libpayload. Basically
two ranges are initialized. One is for the BASE_ADDRESS and other is for the
coreboot_tables. With page tables initialized and MMU enabled, we jump into
code to parse coreboot tables.
2) Once coreboot tables are parsed and we have complete picture of the memory,
we perform a complete page table initialzation and enable MMU and then jump to
payload.
Additionally, we also:
1) Initialize DMA memory on our own depending upon the memory map. It ensures
that the DMA buffer is placed in 32-bit memory.
CQ-DEPEND=CL:216826
BUG=chrome-os-partner:31634
BRANCH=None
TEST=Compiles successfully and we are able to start execution of libpayload in
EL2 and reach kernel login prompt
Change-Id: I8a6203e465868bc2a3e5cc377e108f36cc58e2fa
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Original-Commit-Id: 7695bb7afe34ea460282125a0be440e8994b01e4
Original-Change-Id: Ie0f47b7759d4ac65a6920f7f2f7502b889afda6d
Original-Signed-off-by: Furquan Shaikh <furquan(a)google.com>
Original-Reviewed-on: https://chromium-review.googlesource.com/216824
Original-Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
Original-Tested-by: Furquan Shaikh <furquan(a)chromium.org>
Original-Commit-Queue: Furquan Shaikh <furquan(a)chromium.org>
---
payloads/libpayload/arch/arm64/main.c | 64 +++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/payloads/libpayload/arch/arm64/main.c b/payloads/libpayload/arch/arm64/main.c
index 6b45a01..4561e2e 100644
--- a/payloads/libpayload/arch/arm64/main.c
+++ b/payloads/libpayload/arch/arm64/main.c
@@ -29,6 +29,7 @@
#include <exception.h>
#include <libpayload.h>
+#include <arch/mmu.h>
unsigned int main_argc; /**< The argc value to pass to main() */
@@ -48,6 +49,64 @@ static int test_exception(void)
return 0;
}
+/*
+ * Func: pre_sysinfo_scan_mmu_setup
+ * Desc: We need to setup and enable MMU before we can go to scan coreboot
+ * tables. However, we are not sure what all memory regions to map. Thus,
+ * initializing minimum required memory ranges
+ */
+static void pre_sysinfo_scan_mmu_setup(void)
+{
+ uint64_t start = (uint64_t)&_start;
+ uint64_t end = (uint64_t)&_end;
+
+ /* Memory range 1: Covers the area occupied by payload */
+ mmu_presysinfo_memory_used(start, end - start);
+
+ /*
+ * Memory range 2: Coreboot tables
+ *
+ * Maximum size is assumed 2 pages in case it crosses the GRANULE_SIZE
+ * boundary
+ */
+ mmu_presysinfo_memory_used((uint64_t)get_cb_header_ptr(),
+ 2 * GRANULE_SIZE);
+
+ mmu_presysinfo_enable();
+}
+
+/*
+ * Func: post_sysinfo_scan_mmu_setup
+ * Desc: Once we have scanned coreboot tables, we have complete information
+ * about different memory ranges. Thus, we can perform a complete mmu
+ * initialization. Also, this takes care of DMA area setup
+ */
+static void post_sysinfo_scan_mmu_setup(void)
+{
+ struct memrange *ranges;
+ uint64_t nranges;
+ struct mmu_ranges mmu_ranges;
+ struct mmu_memrange *dma_range;
+
+ /* Get memrange info from lib_sysinfo */
+ lib_sysinfo_get_memranges(&ranges, &nranges);
+
+ /* Get memory ranges for mmu init from lib_sysinfo memrange */
+ dma_range = mmu_init_ranges_from_sysinfo(ranges, nranges, &mmu_ranges);
+
+ /* Disable mmu */
+ mmu_disable();
+
+ /* Init mmu */
+ mmu_init(&mmu_ranges);
+
+ /* Enable mmu */
+ mmu_enable();
+
+ /* Init dma memory */
+ init_dma_memory((void *)dma_range->base, dma_range->size);
+}
+
/**
* This is our C entry function - set up the system
* and jump into the payload entry point.
@@ -57,12 +116,17 @@ void start_main(void)
{
extern int main(int argc, char **argv);
+ pre_sysinfo_scan_mmu_setup();
+
/* Gather system information. */
lib_get_sysinfo();
#ifndef CONFIG_LP_SKIP_CONSOLE_INIT
console_init();
#endif
+
+ post_sysinfo_scan_mmu_setup();
+
printf("ARM64: Libpayload %s\n",__func__);
exception_init();
Patrick Georgi (pgeorgi(a)google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/8793
-gerrit
commit 0a76a846018a32f66bde3f44f8d962ed8f89866b
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Tue Oct 7 23:36:55 2014 -0500
libpayload arm64: fix mmu bugs
1. keep functions and objects used entirely within mmu.c as static.
2. DMA region finding needs to terminate. Therefore, the next address
to be attempted needs to be less then the current end address.
3. Ensure mmu_ranges passed to mmu_init_ranges_from_sysinfo() has
0 entries marked as used.
BUG=chrome-os-partner:31634
BRANCH=None
TEST=Booted ryu with RAM hole above cbmem tables below 4GiB.
Change-Id: I71a9cb89466978aa63fca5d8bee97b8af75ea206
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Original-Commit-Id: 66518fd86e676bbddf52e9d9afdd76d72c8e2222
Original-Change-Id: I5cb4e5009359cb04c4e1b5fe60845f80fbdff02c
Original-Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
Original-Reviewed-on: https://chromium-review.googlesource.com/221725
Original-Reviewed-by: Furquan Shaikh <furquan(a)chromium.org>
Original-Tested-by: Furquan Shaikh <furquan(a)chromium.org>
Original-Commit-Queue: Furquan Shaikh <furquan(a)chromium.org>
---
payloads/libpayload/arch/arm64/mmu.c | 60 ++++++++++++++++------------
payloads/libpayload/include/arm64/arch/mmu.h | 4 --
2 files changed, 35 insertions(+), 29 deletions(-)
diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c
index 85f3ac9..123b9b1 100644
--- a/payloads/libpayload/arch/arm64/mmu.c
+++ b/payloads/libpayload/arch/arm64/mmu.c
@@ -54,7 +54,7 @@ static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __attribute__((aligned(GRANULE_SIZE)
* the DMA buffer is being placed in a sane location and does not overlap any of
* the used mem ranges.
*/
-struct mmu_ranges usedmem_ranges;
+static struct mmu_ranges usedmem_ranges;
static const uint64_t level_to_addr_mask[] = {
L1_ADDR_MASK,
@@ -427,6 +427,29 @@ static int mmu_is_dma_range_valid(uint64_t dma_base,
}
/*
+ * Func: mmu_add_memrange
+ * Desc: Adds a new memory range
+ */
+static struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r,
+ uint64_t base, uint64_t size,
+ uint64_t type)
+{
+ struct mmu_memrange *curr = NULL;
+ int i = r->used;
+
+ if (i < ARRAY_SIZE(r->entries)) {
+ curr = &r->entries[i];
+ curr->base = base;
+ curr->size = size;
+ curr->type = type;
+
+ r->used = i + 1;
+ }
+
+ return curr;
+}
+
+/*
* Func: mmu_add_dma_range
* Desc: Add a memrange for dma operations. This is special because we want to
* initialize this memory as non-cacheable. We have a constraint that the DMA
@@ -458,7 +481,7 @@ static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
* We need to ensure that we do not step over payload regions or
* the coreboot_table
*/
- do {
+ while (1) {
/*
* If end_addr is aligned to GRANULE_SIZE,
* then base_addr will be too.
@@ -472,7 +495,13 @@ static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
if (base_addr < r[i].base)
break;
- } while (mmu_is_dma_range_valid(base_addr, end_addr) == 0);
+
+ if (mmu_is_dma_range_valid(base_addr, end_addr))
+ break;
+
+ /* Drop to the next address. */
+ end_addr -= 1;
+ }
if (base_addr < r[i].base)
continue;
@@ -557,6 +586,9 @@ struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
{
struct mmu_memrange *dma_range;
+ /* Initialize mmu_ranges to contain no entries. */
+ mmu_ranges->used = 0;
+
/* Extract ranges from memrange in lib_sysinfo */
mmu_extract_ranges(cb_ranges, ncb, mmu_ranges);
@@ -570,28 +602,6 @@ struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
}
/*
- * Func: mmu_add_memrange
- * Desc: Adds a new memory range
- */
-struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r, uint64_t base,
- uint64_t size, uint64_t type)
-{
- struct mmu_memrange *curr = NULL;
- int i = r->used;
-
- if (i < ARRAY_SIZE(r->entries)) {
- curr = &r->entries[i];
- curr->base = base;
- curr->size = size;
- curr->type = type;
-
- r->used = i + 1;
- }
-
- return curr;
-}
-
-/*
* Func: mmu_presysinfo_memory_used
* Desc: Initializes all the memory used for presysinfo page table
* initialization and enabling of MMU. All these ranges are stored in
diff --git a/payloads/libpayload/include/arm64/arch/mmu.h b/payloads/libpayload/include/arm64/arch/mmu.h
index 0937f4b..fdb1cc8 100644
--- a/payloads/libpayload/include/arm64/arch/mmu.h
+++ b/payloads/libpayload/include/arm64/arch/mmu.h
@@ -200,10 +200,6 @@ struct mmu_memrange* mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
uint64_t ncb,
struct mmu_ranges *mmu_ranges);
-/* Add a new mmu_memrange */
-struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r, uint64_t base,
- uint64_t size, uint64_t type);
-
/*
* Functions for handling the initialization of memory ranges and enabling mmu
* before coreboot tables are parsed
Patrick Georgi (pgeorgi(a)google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/8794
-gerrit
commit bba1de41b9b952369edd36e4fd860c2dc74e746a
Author: Furquan Shaikh <furquan(a)google.com>
Date: Wed Oct 8 01:04:18 2014 -0700
libpayload arm64: Add function to get new range from available memranges
Provide a function to obtain a new memrange with requested properties (type,
size, alignment, max_addr and other restrictions) from the set of available
memranges passed in coreboot table. One user of this function would be getting
memrange for dma, another one would be framebuffer.
BUG=chrome-os-partner:31634
BRANCH=None
TEST=Compiles successfully and boots to kernel prompt
Change-Id: Ic5a63ca2dca6c71f4ca2d77e2e2c8180d32a38e0
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Original-Commit-Id: 3cd75756e1405e044c029f2878bfcc9c8c962bdf
Original-Change-Id: I187d73a4d55d3c6f49afbe9852901672d25de8dc
Original-Signed-off-by: Furquan Shaikh <furquan(a)google.com>
Original-Reviewed-on: https://chromium-review.googlesource.com/222110
Original-Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
Original-Tested-by: Furquan Shaikh <furquan(a)chromium.org>
Original-Commit-Queue: Furquan Shaikh <furquan(a)chromium.org>
---
payloads/libpayload/arch/arm64/mmu.c | 210 ++++++++++++++++++++++++-----------
1 file changed, 144 insertions(+), 66 deletions(-)
diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c
index 123b9b1..b69dac2 100644
--- a/payloads/libpayload/arch/arm64/mmu.c
+++ b/payloads/libpayload/arch/arm64/mmu.c
@@ -391,8 +391,54 @@ void mmu_enable(void)
}
/*
- * Func: mmu_is_dma_range_valid
- * Desc: We need to ensure that the dma buffer being allocated doesnt overlap
+ * Func: mmu_add_memrange
+ * Desc: Adds a new memory range
+ */
+static struct mmu_memrange *mmu_add_memrange(struct mmu_ranges *r,
+ uint64_t base, uint64_t size,
+ uint64_t type)
+{
+ struct mmu_memrange *curr = NULL;
+ int i = r->used;
+
+ if (i < ARRAY_SIZE(r->entries)) {
+ curr = &r->entries[i];
+ curr->base = base;
+ curr->size = size;
+ curr->type = type;
+
+ r->used = i + 1;
+ }
+
+ return curr;
+}
+
+/* Structure to define properties of new memrange request */
+struct mmu_new_range_prop {
+ /* Type of memrange */
+ uint64_t type;
+ /* Size of the range */
+ uint64_t size;
+ /*
+ * If any restrictions on the max addr limit(This addr is exclusive for
+ * the range), else 0
+ */
+ uint64_t lim_excl;
+ /* If any restrictions on alignment of the range base, else 0 */
+ uint64_t align;
+ /*
+ * Function to test whether selected range is fine.
+ * NULL=any range is fine
+ * Return value 1=valid range, 0=otherwise
+ */
+ int (*is_valid_range)(uint64_t, uint64_t);
+ /* From what type of source range should this range be extracted */
+ uint64_t src_type;
+};
+
+/*
+ * Func: mmu_is_range_free
+ * Desc: We need to ensure that the new range being allocated doesnt overlap
* with any used memory range. Basically:
* 1. Memory ranges used by the payload (usedmem_ranges)
* 2. Any area that falls below _end symbol in linker script (Kernel needs to be
@@ -404,22 +450,23 @@ void mmu_enable(void)
* proper. If there is any memory used above the _end symbol, then it should be
* marked as used memory in usedmem_ranges during the presysinfo_scan.
*/
-static int mmu_is_dma_range_valid(uint64_t dma_base,
- uint64_t dma_end)
+static int mmu_is_range_free(uint64_t r_base,
+ uint64_t r_end)
{
uint64_t payload_end = (uint64_t)&_end;
- uint64_t i = 0;
+ uint64_t i;
struct mmu_memrange *r = &usedmem_ranges.entries[0];
- if ((dma_base <= payload_end) || (dma_end <= payload_end))
+ /* Allocate memranges only above payload */
+ if ((r_base <= payload_end) || (r_end <= payload_end))
return 0;
- for (; i < usedmem_ranges.used; i++) {
+ for (i = 0; i < usedmem_ranges.used; i++) {
uint64_t start = r[i].base;
uint64_t end = start + r[i].size;
- if (((dma_base >= start) && (dma_base <= end)) ||
- ((dma_end >= start) && (dma_end <= end)))
+ if (((r_base >= start) && (r_base <= end)) ||
+ ((r_end >= start) && (r_end <= end)))
return 0;
}
@@ -427,76 +474,58 @@ static int mmu_is_dma_range_valid(uint64_t dma_base,
}
/*
- * Func: mmu_add_memrange
- * Desc: Adds a new memory range
- */
-static struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r,
- uint64_t base, uint64_t size,
- uint64_t type)
-{
- struct mmu_memrange *curr = NULL;
- int i = r->used;
-
- if (i < ARRAY_SIZE(r->entries)) {
- curr = &r->entries[i];
- curr->base = base;
- curr->size = size;
- curr->type = type;
-
- r->used = i + 1;
- }
-
- return curr;
-}
-
-/*
- * Func: mmu_add_dma_range
- * Desc: Add a memrange for dma operations. This is special because we want to
- * initialize this memory as non-cacheable. We have a constraint that the DMA
- * buffer should be below 4GiB(32-bit only). So, we lookup a TYPE_NORMAL_MEM
- * from the lowest available addresses and align it to page size i.e. 64KiB.
+ * Func: mmu_get_new_range
+ * Desc: Add a requested new memrange. We take as input set of all memranges and
+ * a structure to define the new memrange properties i.e. its type, size,
+ * max_addr it can grow upto, alignment restrictions, source type to take range
+ * from and finally a function pointer to check if the chosen range is valid.
*/
-static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
+static struct mmu_memrange *mmu_get_new_range(struct mmu_ranges *mmu_ranges,
+ struct mmu_new_range_prop *new)
{
int i = 0;
struct mmu_memrange *r = &mmu_ranges->entries[0];
+ if (new->size == 0) {
+ printf("MMU Error: Invalid range size\n");
+ return NULL;
+ }
+
for (; i < mmu_ranges->used; i++) {
- if ((r[i].type != TYPE_NORMAL_MEM) ||
- (r[i].size < DMA_DEFAULT_SIZE) ||
- (r[i].base >= MIN_64_BIT_ADDR))
+ if ((r[i].type != new->src_type) ||
+ (r[i].size < new->size) ||
+ (new->lim_excl && (r[i].base >= new->lim_excl)))
continue;
uint64_t base_addr;
uint64_t range_end_addr = r[i].base + r[i].size;
- uint64_t size;
uint64_t end_addr = range_end_addr;
- /* Make sure we choose only 32-bit address range for DMA */
- if (end_addr > MIN_64_BIT_ADDR)
- end_addr = MIN_64_BIT_ADDR;
+ /* Make sure we do not go above max if it is non-zero */
+ if (new->lim_excl && (end_addr >= new->lim_excl))
+ end_addr = new->lim_excl;
- /*
- * We need to ensure that we do not step over payload regions or
- * the coreboot_table
- */
while (1) {
/*
- * If end_addr is aligned to GRANULE_SIZE,
- * then base_addr will be too.
- * (DMA_DEFAULT_SIZE is multiple of GRANULE_SIZE)
+ * In case of alignment requirement,
+ * if end_addr is aligned, then base_addr will be too.
*/
- assert((DMA_DEFAULT_SIZE % GRANULE_SIZE) == 0);
- end_addr = ALIGN_DOWN(end_addr, GRANULE_SIZE);
+ if (new->align)
+ end_addr = ALIGN_DOWN(end_addr, new->align);
- base_addr = end_addr - DMA_DEFAULT_SIZE;
- size = end_addr - base_addr;
+ base_addr = end_addr - new->size;
if (base_addr < r[i].base)
break;
- if (mmu_is_dma_range_valid(base_addr, end_addr))
+ /*
+ * If the selected range is not used and valid for the
+ * user, move ahead with it
+ */
+ if (mmu_is_range_free(base_addr, end_addr) &&
+ ((new->is_valid_range == NULL) ||
+ new->is_valid_range(base_addr, end_addr)))
break;
/* Drop to the next address. */
@@ -506,11 +535,6 @@ static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
if (base_addr < r[i].base)
continue;
- if (r[i].size == size) {
- r[i].type = TYPE_DMA_MEM;
- return &r[i];
- }
-
if (end_addr != range_end_addr) {
/* Add a new memrange since we split up one
* range crossing the 4GiB boundary or doing an
@@ -519,13 +543,19 @@ static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
r[i].size -= (range_end_addr - end_addr);
if (mmu_add_memrange(mmu_ranges, end_addr,
range_end_addr - end_addr,
- TYPE_NORMAL_MEM) == NULL)
+ r[i].type) == NULL)
mmu_error();
}
- r[i].size -= size;
+ if (r[i].size == new->size) {
+ r[i].type = new->type;
+ return &r[i];
+ }
- r = mmu_add_memrange(mmu_ranges, base_addr, size, TYPE_DMA_MEM);
+ r[i].size -= new->size;
+
+ r = mmu_add_memrange(mmu_ranges, base_addr, new->size,
+ new->type);
if (r == NULL)
mmu_error();
@@ -534,11 +564,59 @@ static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
}
/* Should never reach here if everything went fine */
- printf("ARM64 ERROR: No DMA region allocated\n");
+ printf("ARM64 ERROR: No region allocated\n");
return NULL;
}
/*
+ * Func: mmu_alloc_range
+ * Desc: Call get_new_range to get a new memrange which is unused and mark it as
+ * used to avoid same range being allocated for different purposes.
+ */
+static struct mmu_memrange *mmu_alloc_range(struct mmu_ranges *mmu_ranges,
+ struct mmu_new_range_prop *p)
+{
+ struct mmu_memrange *r = mmu_get_new_range(mmu_ranges, p);
+
+ if (r == NULL)
+ return NULL;
+
+ /*
+ * Mark this memrange as used memory. Important since function
+ * can be called multiple times and we do not want to reuse some
+ * range already allocated.
+ */
+ if (mmu_add_memrange(&usedmem_ranges, r->base, r->size, r->type)
+ == NULL)
+ mmu_error();
+
+ return r;
+}
+
+/*
+ * Func: mmu_add_dma_range
+ * Desc: Add a memrange for dma operations. This is special because we want to
+ * initialize this memory as non-cacheable. We have a constraint that the DMA
+ * buffer should be below 4GiB(32-bit only). So, we lookup a TYPE_NORMAL_MEM
+ * from the lowest available addresses and align it to page size i.e. 64KiB.
+ */
+static struct mmu_memrange *mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
+{
+ struct mmu_new_range_prop prop;
+
+ prop.type = TYPE_DMA_MEM;
+ /* DMA_DEFAULT_SIZE is multiple of GRANULE_SIZE */
+ assert((DMA_DEFAULT_SIZE % GRANULE_SIZE) == 0);
+ prop.size = DMA_DEFAULT_SIZE;
+ prop.lim_excl = MIN_64_BIT_ADDR;
+ prop.align = GRANULE_SIZE;
+ prop.is_valid_range = NULL;
+ prop.src_type = TYPE_NORMAL_MEM;
+
+ return mmu_alloc_range(mmu_ranges, &prop);
+}
+
+/*
* Func: mmu_extract_ranges
* Desc: Assumption is that coreboot tables have memranges in sorted
* order. So, if there is an opportunity to combine ranges, we do that as
Patrick Georgi (pgeorgi(a)google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/8795
-gerrit
commit 7a415c9ab1ac3ad419cc88b7c83c0a9233e4cbd5
Author: Furquan Shaikh <furquan(a)google.com>
Date: Thu Oct 9 15:59:12 2014 -0700
libpayload arm64: Move console_init after post_sysinfo_mmu_setup call
This is important since mmu is disabled during the post_sysinfo_mmu_setup call
and calling printf can cause unaligned access.
BUG=None
BRANCH=None
TEST=Compiles successfully and boots to kernel prompt with console_init
Change-Id: I5ef72ee449fdcf30186f97485cc532d6c56b2c5d
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Original-Commit-Id: 688ef3856d0502d057c9543ee7763601156e6385
Original-Change-Id: Ie376e394d084edd6c999fc9edde79f15a0264e7b
Original-Signed-off-by: Furquan Shaikh <furquan(a)google.com>
Original-Reviewed-on: https://chromium-review.googlesource.com/222664
Original-Reviewed-by: Jimmy Zhang <jimmzhang(a)nvidia.com>
Original-Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
Original-Tested-by: Furquan Shaikh <furquan(a)chromium.org>
Original-Commit-Queue: Furquan Shaikh <furquan(a)chromium.org>
---
payloads/libpayload/arch/arm64/main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/payloads/libpayload/arch/arm64/main.c b/payloads/libpayload/arch/arm64/main.c
index 4561e2e..aa02ff7 100644
--- a/payloads/libpayload/arch/arm64/main.c
+++ b/payloads/libpayload/arch/arm64/main.c
@@ -121,12 +121,12 @@ void start_main(void)
/* Gather system information. */
lib_get_sysinfo();
+ post_sysinfo_scan_mmu_setup();
+
#ifndef CONFIG_LP_SKIP_CONSOLE_INIT
console_init();
#endif
- post_sysinfo_scan_mmu_setup();
-
printf("ARM64: Libpayload %s\n",__func__);
exception_init();
Patrick Georgi (pgeorgi(a)google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/8789
-gerrit
commit d375eb0c33fdeea7b2936601a21a8ed2a083375c
Author: Furquan Shaikh <furquan(a)google.com>
Date: Thu Sep 4 15:22:53 2014 -0700
libpayload arm64: Remove dependency on coreboot tables for dma areas
Libpayload should be able to setup its own dma areas and not depend on coreboot
tables for passing this information. This patch and next allow libpayload to
setup dma areas while performing mmu_init
BUG=chrome-os-partner:31634
BRANCH=None
TEST=Compiles successfully and dma areas are setup properly with the mmu init patch
Change-Id: I5f6fd19a957c7626a2bbe6b826c8987e64ed248f
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Original-Commit-Id: 4f3552b8d3439a8b12d1e0b15ef67dcb14b8c96a
Original-Change-Id: I44d9f394fa349abd7182c4ba10f1eaefd6e4fdaa
Original-Signed-off-by: Furquan Shaikh <furquan(a)google.com>
Original-Reviewed-on: https://chromium-review.googlesource.com/216822
Original-Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
Original-Tested-by: Furquan Shaikh <furquan(a)chromium.org>
Original-Commit-Queue: Furquan Shaikh <furquan(a)chromium.org>
---
payloads/libpayload/arch/arm64/coreboot.c | 12 +-----------
1 file changed, 1 insertion(+), 11 deletions(-)
diff --git a/payloads/libpayload/arch/arm64/coreboot.c b/payloads/libpayload/arch/arm64/coreboot.c
index 056d63e..59f84c0 100644
--- a/payloads/libpayload/arch/arm64/coreboot.c
+++ b/payloads/libpayload/arch/arm64/coreboot.c
@@ -35,27 +35,17 @@
/* This pointer gets set in head.S and is passed in from coreboot. */
void *cb_header_ptr;
-static void cb_parse_dma(void *ptr)
-{
- struct lb_range *dma = (struct lb_range *)ptr;
- init_dma_memory(phys_to_virt(dma->range_start), dma->range_size);
-}
+/* == Architecture specific == */
int cb_parse_arch_specific(struct cb_record *rec, struct sysinfo_t *info)
{
switch(rec->tag) {
- case CB_TAG_DMA:
- cb_parse_dma(rec);
- break;
default:
return 0;
}
return 1;
}
-/* == Architecture specific == */
-/* FIXME put in actual address range */
-
int get_coreboot_info(struct sysinfo_t *info)
{
return cb_parse_header(cb_header_ptr, 1, info);
Patrick Georgi (pgeorgi(a)google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/8791
-gerrit
commit ce2b02709460bbfacdbd2b83a3b692cce89cca86
Author: Furquan Shaikh <furquan(a)google.com>
Date: Thu Sep 4 15:32:17 2014 -0700
libpayload arm64: Add support for mmu
Adds support for initializing mmu, setting up dma areas and enabling mmu based
on the memranges passed on in the coreboot tables.
CQ-DEPEND=CL:216826
BUG=chrome-os-partner:31634
BRANCH=None
TEST=Compiles successfully
Change-Id: Id41a4255f1cd45a9455840f1eaa53503bd6fef3f
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Original-Commit-Id: f2c6676bf51fcd85b61e9e08a261634a78137c4c
Original-Change-Id: I217bc5a5aff6a1fc0809c769822d820316d5c434
Original-Signed-off-by: Furquan Shaikh <furquan(a)google.com>
Original-Reviewed-on: https://chromium-review.googlesource.com/216823
Original-Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
Original-Tested-by: Furquan Shaikh <furquan(a)chromium.org>
Original-Commit-Queue: Furquan Shaikh <furquan(a)chromium.org>
---
payloads/libpayload/arch/arm64/Makefile.inc | 1 +
payloads/libpayload/arch/arm64/cache.c | 24 -
payloads/libpayload/arch/arm64/mmu.c | 618 +++++++++++++++++++++++++
payloads/libpayload/include/arm64/arch/cache.h | 3 -
payloads/libpayload/include/arm64/arch/mmu.h | 213 +++++++++
5 files changed, 832 insertions(+), 27 deletions(-)
diff --git a/payloads/libpayload/arch/arm64/Makefile.inc b/payloads/libpayload/arch/arm64/Makefile.inc
index 1c23a47..ddf0550 100644
--- a/payloads/libpayload/arch/arm64/Makefile.inc
+++ b/payloads/libpayload/arch/arm64/Makefile.inc
@@ -40,6 +40,7 @@ libc-y += memcpy.S memset.S memmove.S
libc-y += exception_asm.S exception.c
libc-y += cache.c cpu.S
libc-y += selfboot.c
+libc-y += mmu.c
libcbfs-$(CONFIG_LP_CBFS) += dummy_media.c
libgdb-y += gdb.c
diff --git a/payloads/libpayload/arch/arm64/cache.c b/payloads/libpayload/arch/arm64/cache.c
index 1a9e7a6..2ce1cc4 100644
--- a/payloads/libpayload/arch/arm64/cache.c
+++ b/payloads/libpayload/arch/arm64/cache.c
@@ -117,30 +117,6 @@ void dcache_invalidate_by_mva(void const *addr, size_t len)
dcache_op_va(addr, len, OP_DCIVAC);
}
-/*
- * CAUTION: This implementation assumes that coreboot never uses non-identity
- * page tables for pages containing executed code. If you ever want to violate
- * this assumption, have fun figuring out the associated problems on your own.
- */
-void dcache_mmu_disable(void)
-{
- uint32_t sctlr;
-
- dcache_clean_invalidate_all();
- sctlr = raw_read_sctlr_current();
- sctlr &= ~(SCTLR_C | SCTLR_M);
- raw_write_sctlr_current(sctlr);
-}
-
-void dcache_mmu_enable(void)
-{
- uint32_t sctlr;
-
- sctlr = raw_read_sctlr_current();
- sctlr |= SCTLR_C | SCTLR_M;
- raw_write_sctlr_current(sctlr);
-}
-
void cache_sync_instructions(void)
{
dcache_clean_all(); /* includes trailing DSB (in assembly) */
diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c
new file mode 100644
index 0000000..85f3ac9
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/mmu.c
@@ -0,0 +1,618 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch/mmu.h>
+#include <arch/lib_helpers.h>
+#include <arch/cache.h>
+
+/* Maximum number of XLAT Tables available based on ttb buffer size */
+static unsigned int max_tables;
+/* Address of ttb buffer */
+static uint64_t *xlat_addr;
+
+static int free_idx;
+static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __attribute__((aligned(GRANULE_SIZE)));
+
+/*
+ * The usedmem_ranges is used to describe all the memory ranges that are
+ * actually used by payload i.e. _start -> _end in linker script and the
+ * coreboot tables. This is required for two purposes:
+ * 1) During the pre_sysinfo_scan_mmu_setup, these are the only ranges
+ * initialized in the page table as we do not know the entire memory map.
+ * 2) During the post_sysinfo_scan_mmu_setup, these ranges are used to check if
+ * the DMA buffer is being placed in a sane location and does not overlap any of
+ * the used mem ranges.
+ */
+struct mmu_ranges usedmem_ranges;
+
+static const uint64_t level_to_addr_mask[] = {
+ L1_ADDR_MASK,
+ L2_ADDR_MASK,
+ L3_ADDR_MASK,
+};
+
+static const uint64_t level_to_addr_shift[] = {
+ L1_ADDR_SHIFT,
+ L2_ADDR_SHIFT,
+ L3_ADDR_SHIFT,
+};
+
+static void __attribute__((noreturn)) mmu_error(void)
+{
+ halt();
+}
+
+/*
+ * Func : get_block_attr
+ * Desc : Get block descriptor attributes based on the value of tag in memrange
+ * region
+ */
+static uint64_t get_block_attr(unsigned long tag)
+{
+ uint64_t attr;
+
+ /* We should be in EL2(which is non-secure only) or EL1(non-secure) */
+ attr = BLOCK_NS;
+
+ /* Assuming whole memory is read-write */
+ attr |= BLOCK_AP_RW;
+
+ attr |= BLOCK_ACCESS;
+
+ switch (tag) {
+
+ case TYPE_NORMAL_MEM:
+ attr |= (BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT);
+ break;
+ case TYPE_DEV_MEM:
+ attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
+ break;
+ case TYPE_DMA_MEM:
+ attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
+ break;
+ }
+
+ return attr;
+}
+
+/*
+ * Func : get_index_from_addr
+ * Desc : Get index into table at a given level using appropriate bits from the
+ * base address
+ */
+static uint64_t get_index_from_addr(uint64_t addr, uint8_t level)
+{
+ uint64_t mask = level_to_addr_mask[level-1];
+ uint8_t shift = level_to_addr_shift[level-1];
+
+ return ((addr & mask) >> shift);
+}
+
+/*
+ * Func : table_desc_valid
+ * Desc : Check if a table entry contains valid desc
+ */
+static uint64_t table_desc_valid(uint64_t desc)
+{
+ return((desc & TABLE_DESC) == TABLE_DESC);
+}
+
+/*
+ * Func : get_new_table
+ * Desc : Return the next free XLAT table from ttb buffer
+ */
+static uint64_t *get_new_table(void)
+{
+ uint64_t *new;
+
+ if (free_idx >= max_tables) {
+ printf("ARM64 MMU: No free table\n");
+ return NULL;
+ }
+
+ new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
+ free_idx++;
+
+ memset(new, 0, GRANULE_SIZE);
+
+ return new;
+}
+
+/*
+ * Func : get_table_from_desc
+ * Desc : Get next level table address from table descriptor
+ */
+static uint64_t *get_table_from_desc(uint64_t desc)
+{
+ uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK);
+ return ptr;
+}
+
+/*
+ * Func: get_next_level_table
+ * Desc: Check if the table entry is a valid descriptor. If not, allocate new
+ * table, update the entry and return the table addr. If valid, return the addr.
+ */
+static uint64_t *get_next_level_table(uint64_t *ptr)
+{
+ uint64_t desc = *ptr;
+
+ if (!table_desc_valid(desc)) {
+ uint64_t *new_table = get_new_table();
+ if (new_table == NULL)
+ return NULL;
+ desc = ((uint64_t)new_table) | TABLE_DESC;
+ *ptr = desc;
+ }
+ return get_table_from_desc(desc);
+}
+
+/*
+ * Func : init_xlat_table
+ * Desc : Given a base address and size, it identifies the indices within
+ * different level XLAT tables which map the given base addr. Similar to table
+ * walk, except that all invalid entries during the walk are updated
+ * accordingly. On success, it returns the size of the block/page addressed by
+ * the final table.
+ */
+static uint64_t init_xlat_table(uint64_t base_addr,
+ uint64_t size,
+ uint64_t tag)
+{
+ uint64_t l1_index = get_index_from_addr(base_addr,1);
+ uint64_t l2_index = get_index_from_addr(base_addr,2);
+ uint64_t l3_index = get_index_from_addr(base_addr,3);
+ uint64_t *table = xlat_addr;
+ uint64_t desc;
+ uint64_t attr = get_block_attr(tag);
+
+ /*
+ * L1 table lookup
+ * If VA has bits more than 41, lookup starts at L1
+ */
+ if (l1_index) {
+ table = get_next_level_table(&table[l1_index]);
+ if (!table)
+ return 0;
+ }
+
+ /*
+ * L2 table lookup
+ * If lookup was performed at L1, L2 table addr is obtained from L1 desc
+ * else, lookup starts at ttbr address
+ */
+ if (!l3_index && (size >= L2_XLAT_SIZE)) {
+ /*
+ * If block address is aligned and size is greater than or equal
+ * to 512MiB i.e. size addressed by each L2 entry, we can
+ * directly store a block desc
+ */
+ desc = base_addr | BLOCK_DESC | attr;
+ table[l2_index] = desc;
+ /* L3 lookup is not required */
+ return L2_XLAT_SIZE;
+ } else {
+ /* L2 entry stores a table descriptor */
+ table = get_next_level_table(&table[l2_index]);
+ if (!table)
+ return 0;
+ }
+
+ /* L3 table lookup */
+ desc = base_addr | PAGE_DESC | attr;
+ table[l3_index] = desc;
+ return L3_XLAT_SIZE;
+}
+
+/*
+ * Func : sanity_check
+ * Desc : Check if the address is aligned and size is atleast the granule size
+ */
+static uint64_t sanity_check(uint64_t addr,
+ uint64_t size)
+{
+ /* Address should be atleast 64 KiB aligned */
+ if (addr & GRANULE_SIZE_MASK)
+ return 1;
+
+ /* Size should be atleast granule size */
+ if (size < GRANULE_SIZE)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Func : init_mmap_entry
+ * Desc : For each mmap entry, this function calls init_xlat_table with the base
+ * address. Based on size returned from init_xlat_table, base_addr is updated
+ * and subsequent calls are made for initializing the xlat table until the whole
+ * region is initialized.
+ */
+static void init_mmap_entry(struct mmu_memrange *r)
+{
+ uint64_t base_addr = r->base;
+ uint64_t size = r->size;
+ uint64_t tag = r->type;
+ uint64_t temp_size = size;
+
+ while (temp_size) {
+ uint64_t ret;
+
+ if (sanity_check(base_addr,temp_size)) {
+ printf("Libpayload: ARM64 MMU: sanity check failed\n");
+ return;
+ }
+
+ ret = init_xlat_table(base_addr + (size - temp_size),
+ temp_size, tag);
+
+ if (ret == 0)
+ return;
+
+ temp_size -= ret;
+ }
+}
+
+/*
+ * Func : mmu_init
+ * Desc : Initialize mmu based on the mmu_memrange passed. ttb_buffer is used as
+ * the base address for xlat tables. TTB_DEFAULT_SIZE defines the max number of
+ * tables that can be used
+ * Assuming that memory 0-2GiB is device memory.
+ */
+uint64_t mmu_init(struct mmu_ranges *mmu_ranges)
+{
+ struct mmu_memrange devrange = { 0, 0x80000000, TYPE_DEV_MEM };
+
+ int i = 0;
+
+ xlat_addr = (uint64_t *)&ttb_buffer;
+
+ memset((void*)xlat_addr, 0, GRANULE_SIZE);
+ max_tables = (TTB_DEFAULT_SIZE >> GRANULE_SIZE_SHIFT);
+ free_idx = 1;
+
+ printf("Libpayload ARM64: TTB_BUFFER: 0x%p Max Tables: %d\n",
+ (void*)xlat_addr, max_tables);
+
+ init_mmap_entry(&devrange);
+
+ for (; i < mmu_ranges->used; i++) {
+ init_mmap_entry(&mmu_ranges->entries[i]);
+ }
+
+ printf("Libpayload ARM64: MMU init done\n");
+ return 0;
+}
+
+static uint32_t is_mmu_enabled(void)
+{
+ uint32_t sctlr;
+
+ sctlr = raw_read_sctlr_current();
+
+ return (sctlr & SCTLR_M);
+}
+
+/*
+ * Func: mmu_disable
+ * Desc: Invalidate caches and disable mmu
+ */
+void mmu_disable(void)
+{
+ uint32_t sctlr;
+
+ sctlr = raw_read_sctlr_current();
+ sctlr &= ~(SCTLR_C | SCTLR_M | SCTLR_I);
+
+ tlbiall_current();
+ dcache_clean_invalidate_all();
+
+ dsb();
+ isb();
+
+ raw_write_sctlr_current(sctlr);
+
+ dcache_clean_invalidate_all();
+ dsb();
+ isb();
+}
+
+/*
+ * Func: mmu_enable
+ * Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits
+ * in SCTLR
+ */
+void mmu_enable(void)
+{
+ uint32_t sctlr;
+
+ /* Initialize MAIR indices */
+ raw_write_mair_current(MAIR_ATTRIBUTES);
+
+ /* Invalidate TLBs */
+ tlbiall_current();
+
+ /* Initialize TCR flags */
+ raw_write_tcr_current(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
+ TCR_SH0_IS | TCR_TG0_64KB | TCR_PS_64GB |
+ TCR_TBI_USED);
+
+ /* Initialize TTBR */
+ raw_write_ttbr0_current((uintptr_t)xlat_addr);
+
+ /* Ensure all translation table writes are committed before enabling MMU */
+ dsb();
+ isb();
+
+ /* Enable MMU */
+ sctlr = raw_read_sctlr_current();
+ sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
+ raw_write_sctlr_current(sctlr);
+
+ isb();
+
+ if(is_mmu_enabled())
+ printf("ARM64: MMU enable done\n");
+ else
+ printf("ARM64: MMU enable failed\n");
+}
+
+/*
+ * Func: mmu_is_dma_range_valid
+ * Desc: We need to ensure that the dma buffer being allocated doesnt overlap
+ * with any used memory range. Basically:
+ * 1. Memory ranges used by the payload (usedmem_ranges)
+ * 2. Any area that falls below _end symbol in linker script (Kernel needs to be
+ * loaded in lower areas of memory, So, the payload linker script can have
+ * kernel memory below _start and _end. Thus, we want to make sure we do not
+ * step in those areas as well.
+ * Returns: 1 on success, 0 on error
+ * ASSUMPTION: All the memory used by payload resides below the program
+ * proper. If there is any memory used above the _end symbol, then it should be
+ * marked as used memory in usedmem_ranges during the presysinfo_scan.
+ */
+static int mmu_is_dma_range_valid(uint64_t dma_base,
+ uint64_t dma_end)
+{
+ uint64_t payload_end = (uint64_t)&_end;
+ uint64_t i = 0;
+ struct mmu_memrange *r = &usedmem_ranges.entries[0];
+
+ if ((dma_base <= payload_end) || (dma_end <= payload_end))
+ return 0;
+
+ for (; i < usedmem_ranges.used; i++) {
+ uint64_t start = r[i].base;
+ uint64_t end = start + r[i].size;
+
+ if (((dma_base >= start) && (dma_base <= end)) ||
+ ((dma_end >= start) && (dma_end <= end)))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Func: mmu_add_dma_range
+ * Desc: Add a memrange for dma operations. This is special because we want to
+ * initialize this memory as non-cacheable. We have a constraint that the DMA
+ * buffer should be below 4GiB(32-bit only). So, we lookup a TYPE_NORMAL_MEM
+ * from the lowest available addresses and align it to page size i.e. 64KiB.
+ */
+static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
+{
+ int i = 0;
+ struct mmu_memrange *r = &mmu_ranges->entries[0];
+
+ for (; i < mmu_ranges->used; i++) {
+
+ if ((r[i].type != TYPE_NORMAL_MEM) ||
+ (r[i].size < DMA_DEFAULT_SIZE) ||
+ (r[i].base >= MIN_64_BIT_ADDR))
+ continue;
+
+ uint64_t base_addr;
+ uint64_t range_end_addr = r[i].base + r[i].size;
+ uint64_t size;
+ uint64_t end_addr = range_end_addr;
+
+ /* Make sure we choose only 32-bit address range for DMA */
+ if (end_addr > MIN_64_BIT_ADDR)
+ end_addr = MIN_64_BIT_ADDR;
+
+ /*
+ * We need to ensure that we do not step over payload regions or
+ * the coreboot_table
+ */
+ do {
+ /*
+ * If end_addr is aligned to GRANULE_SIZE,
+ * then base_addr will be too.
+ * (DMA_DEFAULT_SIZE is multiple of GRANULE_SIZE)
+ */
+ assert((DMA_DEFAULT_SIZE % GRANULE_SIZE) == 0);
+ end_addr = ALIGN_DOWN(end_addr, GRANULE_SIZE);
+
+ base_addr = end_addr - DMA_DEFAULT_SIZE;
+ size = end_addr - base_addr;
+
+ if (base_addr < r[i].base)
+ break;
+ } while (mmu_is_dma_range_valid(base_addr, end_addr) == 0);
+
+ if (base_addr < r[i].base)
+ continue;
+
+ if (r[i].size == size) {
+ r[i].type = TYPE_DMA_MEM;
+ return &r[i];
+ }
+
+ if (end_addr != range_end_addr) {
+ /* Add a new memrange since we split up one
+ * range crossing the 4GiB boundary or doing an
+ * ALIGN_DOWN on end_addr.
+ */
+ r[i].size -= (range_end_addr - end_addr);
+ if (mmu_add_memrange(mmu_ranges, end_addr,
+ range_end_addr - end_addr,
+ TYPE_NORMAL_MEM) == NULL)
+ mmu_error();
+ }
+
+ r[i].size -= size;
+
+ r = mmu_add_memrange(mmu_ranges, base_addr, size, TYPE_DMA_MEM);
+
+ if (r == NULL)
+ mmu_error();
+
+ return r;
+ }
+
+ /* Should never reach here if everything went fine */
+ printf("ARM64 ERROR: No DMA region allocated\n");
+ return NULL;
+}
+
+/*
+ * Func: mmu_extract_ranges
+ * Desc: Assumption is that coreboot tables have memranges in sorted
+ * order. So, if there is an opportunity to combine ranges, we do that as
+ * well. Memranges are initialized for both CB_MEM_RAM and CB_MEM_TABLE as
+ * TYPE_NORMAL_MEM.
+ */
+static void mmu_extract_ranges(struct memrange *cb_ranges,
+ uint64_t ncb,
+ struct mmu_ranges *mmu_ranges)
+{
+ int i = 0;
+ struct mmu_memrange *prev_range = NULL;
+
+ /* Extract memory ranges to be mapped */
+ for (; i < ncb; i++) {
+ switch (cb_ranges[i].type) {
+ case CB_MEM_RAM:
+ case CB_MEM_TABLE:
+ if (prev_range && (prev_range->base + prev_range->size
+ == cb_ranges[i].base)) {
+ prev_range->size += cb_ranges[i].size;
+ } else {
+ prev_range = mmu_add_memrange(mmu_ranges,
+ cb_ranges[i].base,
+ cb_ranges[i].size,
+ TYPE_NORMAL_MEM);
+ if (prev_range == NULL)
+ mmu_error();
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * Func: mmu_init_ranges
+ * Desc: Initialize mmu_memranges based on the memranges obtained from coreboot
+ * tables. Also, initialize dma memrange and xlat_addr for ttb buffer.
+ */
+struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
+ uint64_t ncb,
+ struct mmu_ranges *mmu_ranges)
+{
+ struct mmu_memrange *dma_range;
+
+ /* Extract ranges from memrange in lib_sysinfo */
+ mmu_extract_ranges(cb_ranges, ncb, mmu_ranges);
+
+ /* Get a range for dma */
+ dma_range = mmu_add_dma_range(mmu_ranges);
+
+ if (dma_range == NULL)
+ mmu_error();
+
+ return dma_range;
+}
+
+/*
+ * Func: mmu_add_memrange
+ * Desc: Adds a new memory range
+ */
+struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r, uint64_t base,
+ uint64_t size, uint64_t type)
+{
+ struct mmu_memrange *curr = NULL;
+ int i = r->used;
+
+ if (i < ARRAY_SIZE(r->entries)) {
+ curr = &r->entries[i];
+ curr->base = base;
+ curr->size = size;
+ curr->type = type;
+
+ r->used = i + 1;
+ }
+
+ return curr;
+}
+
+/*
+ * Func: mmu_presysinfo_memory_used
+ * Desc: Initializes all the memory used for presysinfo page table
+ * initialization and enabling of MMU. All these ranges are stored in
+ * usedmem_ranges. usedmem_ranges plays an important role in selecting the dma
+ * buffer as well since we check the dma buffer range against the used memory
+ * ranges to prevent any overstepping.
+ */
+void mmu_presysinfo_memory_used(uint64_t base, uint64_t size)
+{
+ uint64_t range_base;
+
+ range_base = ALIGN_DOWN(base, GRANULE_SIZE);
+
+ size += (base - range_base);
+ size = ALIGN_UP(size, GRANULE_SIZE);
+
+ mmu_add_memrange(&usedmem_ranges, range_base, size, TYPE_NORMAL_MEM);
+}
+
+void mmu_presysinfo_enable(void)
+{
+ mmu_init(&usedmem_ranges);
+ mmu_enable();
+}
diff --git a/payloads/libpayload/include/arm64/arch/cache.h b/payloads/libpayload/include/arm64/arch/cache.h
index 5a0b3b0..cfd3109 100644
--- a/payloads/libpayload/include/arm64/arch/cache.h
+++ b/payloads/libpayload/include/arm64/arch/cache.h
@@ -107,9 +107,6 @@ void tlb_invalidate_all(void);
* Generalized setup/init functions
*/
-/* mmu initialization (set page table address, set permissions, etc) */
-void mmu_init(void);
-
enum dcache_policy {
DCACHE_OFF,
DCACHE_WRITEBACK,
diff --git a/payloads/libpayload/include/arm64/arch/mmu.h b/payloads/libpayload/include/arm64/arch/mmu.h
new file mode 100644
index 0000000..0937f4b
--- /dev/null
+++ b/payloads/libpayload/include/arm64/arch/mmu.h
@@ -0,0 +1,213 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __ARCH_ARM64_MMU_H__
+#define __ARCH_ARM64_MMU_H__
+
+#include <libpayload.h>
+
+struct mmu_memrange {
+ uint64_t base;
+ uint64_t size;
+ uint64_t type;
+};
+
+struct mmu_ranges {
+ struct mmu_memrange entries[SYSINFO_MAX_MEM_RANGES];
+ size_t used;
+};
+
+/*
+ * Symbols taken from linker script
+ * They mark the start and end of the region used by payload
+ */
+extern char _start[], _end[];
+
+/* IMPORTANT!!!!!!!
+ * Assumptions made:
+ * Granule size is 64KiB
+ * BITS per Virtual address is 33
+ * All the calculations for tables L1,L2 and L3 are based on these assumptions
+ * If these values are changed, recalculate the other macros as well
+ */
+
+/* Memory attributes for mmap regions
+ * These attributes act as tag values for memrange regions
+ */
+
+#define TYPE_NORMAL_MEM 1
+#define TYPE_DEV_MEM 2
+#define TYPE_DMA_MEM 3
+
+/* Descriptor attributes */
+
+#define INVALID_DESC 0x0
+#define BLOCK_DESC 0x1
+#define TABLE_DESC 0x3
+#define PAGE_DESC 0x3
+
+/* Block descriptor */
+#define BLOCK_NS (1 << 5)
+
+#define BLOCK_AP_RW (0 << 7)
+#define BLOCK_AP_RO (1 << 7)
+
+#define BLOCK_ACCESS (1 << 10)
+
+/* XLAT Table Init Attributes */
+
+#define VA_START 0x0
+/* If BITS_PER_VA or GRANULE_SIZE are changed, recalculate and change the
+ macros following them */
+#define BITS_PER_VA 33
+/* Granule size of 64KB is being used */
+#define MIN_64_BIT_ADDR (1UL << 32)
+#define XLAT_TABLE_MASK ~(0xffffUL)
+#define GRANULE_SIZE_SHIFT 16
+#define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT)
+#define GRANULE_SIZE_MASK ((1 << 16) - 1)
+
+#define L1_ADDR_SHIFT 42
+#define L2_ADDR_SHIFT 29
+#define L3_ADDR_SHIFT 16
+
+#define L1_ADDR_MASK (0UL << L1_ADDR_SHIFT)
+#define L2_ADDR_MASK (0xfUL << L2_ADDR_SHIFT)
+#define L3_ADDR_MASK (0x1fffUL << L3_ADDR_SHIFT)
+
+/* Dependent on BITS_PER_VA and GRANULE_SIZE */
+#define INIT_LEVEL 2
+#define XLAT_MAX_LEVEL 3
+
+/* Each entry in XLAT table is 8 bytes */
+#define XLAT_ENTRY_SHIFT 3
+#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SHIFT)
+
+#define XLAT_TABLE_SHIFT GRANULE_SIZE_SHIFT
+#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SHIFT)
+
+#define XLAT_NUM_ENTRIES_SHIFT (XLAT_TABLE_SHIFT - XLAT_ENTRY_SHIFT)
+#define XLAT_NUM_ENTRIES (1 << XLAT_NUM_ENTRIES_SHIFT)
+
+#define L3_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT)
+#define L2_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT)
+#define L1_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT)
+
+/* These macros give the size of the region addressed by each entry of a xlat
+ table at any given level */
+#define L3_XLAT_SIZE (1 << L3_XLAT_SIZE_SHIFT)
+#define L2_XLAT_SIZE (1 << L2_XLAT_SIZE_SHIFT)
+#define L1_XLAT_SIZE (1 << L1_XLAT_SIZE_SHIFT)
+
+/* Block indices required for MAIR */
+#define BLOCK_INDEX_MEM_DEV_NGNRNE 0
+#define BLOCK_INDEX_MEM_DEV_NGNRE 1
+#define BLOCK_INDEX_MEM_DEV_GRE 2
+#define BLOCK_INDEX_MEM_NORMAL_NC 3
+#define BLOCK_INDEX_MEM_NORMAL 4
+
+#define BLOCK_INDEX_SHIFT 2
+
+/* MAIR attributes */
+#define MAIR_ATTRIBUTES ((0x00 << (BLOCK_INDEX_MEM_DEV_NGNRNE*8)) | \
+ (0x04 << (BLOCK_INDEX_MEM_DEV_NGNRE*8)) | \
+ (0x0c << (BLOCK_INDEX_MEM_DEV_GRE*8)) | \
+ (0x44 << (BLOCK_INDEX_MEM_NORMAL_NC*8)) | \
+ (0xffUL << (BLOCK_INDEX_MEM_NORMAL*8)))
+
+/* TCR attributes */
+#define TCR_TOSZ (64 - BITS_PER_VA)
+
+#define TCR_IRGN0_SHIFT 8
+#define TCR_IRGN0_NM_NC (0x00 << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NM_WBWAC (0x01 << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NM_WTC (0x02 << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NM_WBNWAC (0x03 << TCR_IRGN0_SHIFT)
+
+#define TCR_ORGN0_SHIFT 10
+#define TCR_ORGN0_NM_NC (0x00 << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NM_WBWAC (0x01 << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NM_WTC (0x02 << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NM_WBNWAC (0x03 << TCR_ORGN0_SHIFT)
+
+#define TCR_SH0_SHIFT 12
+#define TCR_SH0_NC (0x0 << TCR_SH0_SHIFT)
+#define TCR_SH0_OS (0x2 << TCR_SH0_SHIFT)
+#define TCR_SH0_IS (0x3 << TCR_SH0_SHIFT)
+
+#define TCR_TG0_SHIFT 14
+#define TCR_TG0_4KB (0x0 << TCR_TG0_SHIFT)
+#define TCR_TG0_64KB (0x1 << TCR_TG0_SHIFT)
+#define TCR_TG0_16KB (0x2 << TCR_TG0_SHIFT)
+
+#define TCR_PS_SHIFT 16
+#define TCR_PS_4GB (0x0 << TCR_PS_SHIFT)
+#define TCR_PS_64GB (0x1 << TCR_PS_SHIFT)
+#define TCR_PS_1TB (0x2 << TCR_PS_SHIFT)
+#define TCR_PS_4TB (0x3 << TCR_PS_SHIFT)
+#define TCR_PS_16TB (0x4 << TCR_PS_SHIFT)
+#define TCR_PS_256TB (0x5 << TCR_PS_SHIFT)
+
+#define TCR_TBI_SHIFT 20
+#define TCR_TBI_USED (0x0 << TCR_TBI_SHIFT)
+#define TCR_TBI_IGNORED (0x1 << TCR_TBI_SHIFT)
+
+#define DMA_DEFAULT_SIZE (0x20 * GRANULE_SIZE)
+#define TTB_DEFAULT_SIZE 0x100000
+
+/* Initialize the MMU TTB tables using the mmu_ranges */
+uint64_t mmu_init(struct mmu_ranges *mmu_ranges);
+
+/* Enable the mmu based on previous mmu_init(). */
+void mmu_enable(void);
+
+/* Disable mmu */
+void mmu_disable(void);
+
+/*
+ * Based on the memory ranges provided in coreboot tables,
+ * initialize the mmu_memranges used for mmu initialization
+ * cb_ranges -> Memory ranges present in cb tables
+ * mmu_ranges -> mmu_memranges initialized by this function
+ */
+struct mmu_memrange* mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
+ uint64_t ncb,
+ struct mmu_ranges *mmu_ranges);
+
+/* Add a new mmu_memrange */
+struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r, uint64_t base,
+ uint64_t size, uint64_t type);
+
+/*
+ * Functions for handling the initialization of memory ranges and enabling mmu
+ * before coreboot tables are parsed
+ */
+void mmu_presysinfo_memory_used(uint64_t base, uint64_t size);
+void mmu_presysinfo_enable(void);
+#endif // __ARCH_ARM64_MMU_H__