Attention is currently required from: Julius Werner.
Vladimir Serbinenko has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/81505?usp=email )
Change subject: Support loading legacy stage ......................................................................
Support loading legacy stage
This allows to create hybrid images that boot old signed stage image in one of RW_A/RW_B and a newer RO with newer behaviour. This allows to have dual-boot between stock ChromeOS and custom coreboot
Change-Id: I4ae29a6227235c86f9f846986c18b361c3b3c78d Signed-off-by: Vladimir Serbinenko phcoder@gmail.com --- M src/commonlib/bsd/include/commonlib/bsd/cbfs_serialized.h M src/lib/cbfs.c 2 files changed, 77 insertions(+), 27 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/05/81505/1
diff --git a/src/commonlib/bsd/include/commonlib/bsd/cbfs_serialized.h b/src/commonlib/bsd/include/commonlib/bsd/cbfs_serialized.h index b6a7baa..4109f1c 100644 --- a/src/commonlib/bsd/include/commonlib/bsd/cbfs_serialized.h +++ b/src/commonlib/bsd/include/commonlib/bsd/cbfs_serialized.h @@ -178,6 +178,17 @@ /* Following are component sub-headers for the "standard" component types */
+/** This is the sub-header for legacy stage components. Stages are + loaded by coreboot during the normal boot process */ + +struct cbfs_legacy_stage { + uint32_t compression; /** Compression type */ + uint64_t entry; /** entry point */ + uint64_t load; /** Where to load in memory */ + uint32_t len; /** length of data to load */ + uint32_t memlen; /** total length of object in memory */ +} __packed; + /** this is the sub-header for payload components. Payloads are loaded by coreboot at the end of the boot process */
diff --git a/src/lib/cbfs.c b/src/lib/cbfs.c index 03624bf..19df07f 100644 --- a/src/lib/cbfs.c +++ b/src/lib/cbfs.c @@ -206,11 +206,11 @@ return false; }
-static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *buffer, - size_t buffer_size, uint32_t compression, - const union cbfs_mdata *mdata, bool skip_verification) + +static size_t cbfs_load_and_decompress_offset(const struct region_device *rdev, void *buffer, + size_t buffer_size, uint32_t compression, + const union cbfs_mdata *mdata, bool skip_verification, size_t in_offset, size_t in_size) { - size_t in_size = region_device_sz(rdev); size_t out_size = 0; void *map;
@@ -228,7 +228,7 @@ case CBFS_COMPRESS_NONE: if (buffer_size < in_size) return 0; - if (rdev_readat(rdev, buffer, 0, in_size) != in_size) + if (rdev_readat(rdev, buffer, in_offset, in_size) != in_size) return 0; if (cbfs_file_hash_mismatch(buffer, in_size, mdata, skip_verification)) return 0; @@ -240,7 +240,7 @@
/* cbfs_prog_stage_load() takes care of in-place LZ4 decompression by setting up the rdev to be in memory. */ - map = rdev_mmap_full(rdev); + map = rdev_mmap(rdev, in_offset, in_size); if (map == NULL) return 0;
@@ -257,7 +257,7 @@ case CBFS_COMPRESS_LZMA: if (!cbfs_lzma_enabled()) return 0; - map = rdev_mmap_full(rdev); + map = rdev_mmap(rdev, in_offset, in_size); if (map == NULL) return 0;
@@ -277,6 +277,13 @@ } }
+static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *buffer, + size_t buffer_size, uint32_t compression, + const union cbfs_mdata *mdata, bool skip_verification) +{ + return cbfs_load_and_decompress_offset(rdev, buffer, buffer_size, compression, mdata, skip_verification, 0, region_device_sz(rdev)); +} + struct cbfs_preload_context { struct region_device rdev; struct thread_handle handle; @@ -560,39 +567,72 @@ if ((err = _cbfs_boot_lookup(prog_name(pstage), false, &mdata, &rdev))) return err;
- assert(be32toh(mdata.h.type) == CBFS_TYPE_STAGE); - pstage->cbfs_type = CBFS_TYPE_STAGE; + uint32_t stage_type = be32toh(mdata.h.type); + assert(stage_type == CBFS_TYPE_STAGE || stage_type == CBFS_TYPE_LEGACY_STAGE); + pstage->cbfs_type = stage_type;
enum cbfs_compression compression = CBFS_COMPRESS_NONE; - const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata, - CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr)); - if (cattr) - compression = be32toh(cattr->compression); + size_t foffset = 0; + size_t fsize = region_device_sz(&rdev); + size_t memsize; + uint8_t *load;
- const struct cbfs_file_attr_stageheader *sattr = cbfs_find_attr(&mdata, - CBFS_FILE_ATTR_TAG_STAGEHEADER, sizeof(*sattr)); - if (!sattr) - return CB_ERR; - prog_set_area(pstage, (void *)(uintptr_t)be64toh(sattr->loadaddr), - be32toh(sattr->memlen)); - prog_set_entry(pstage, prog_start(pstage) + + if (stage_type == CBFS_TYPE_LEGACY_STAGE) { + struct cbfs_legacy_stage legacy_stage; + void *entry; + if (rdev_readat(&rdev, &legacy_stage, 0, sizeof(legacy_stage)) != sizeof(legacy_stage)) + return CB_CBFS_IO; + + fsize -= sizeof(legacy_stage); + foffset += sizeof(legacy_stage); + + /* cbfs_stage fields are written in little endian despite the other + cbfs data types being encoded in big endian. */ + compression = le32toh(legacy_stage.compression); + entry = (void *)(uintptr_t)le64toh(legacy_stage.entry); + load = (void *)(uintptr_t)le64toh(legacy_stage.load); + memsize = le32toh(legacy_stage.memlen); + + assert(fsize == le32toh(legacy_stage.len)); + prog_set_area(pstage, load, memsize); + prog_set_entry(pstage, entry, NULL); + } else { + const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata, + CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr)); + if (cattr) + compression = be32toh(cattr->compression); + + const struct cbfs_file_attr_stageheader *sattr = cbfs_find_attr(&mdata, + CBFS_FILE_ATTR_TAG_STAGEHEADER, sizeof(*sattr)); + if (!sattr) + return CB_ERR; + prog_set_area(pstage, (void *)(uintptr_t)be64toh(sattr->loadaddr), + be32toh(sattr->memlen)); + prog_set_entry(pstage, prog_start(pstage) + be32toh(sattr->entry_offset), NULL); + load = prog_start(pstage); + memsize = prog_size(pstage); + }
/* Hacky way to not load programs over read only media. The stages * that would hit this path initialize themselves. */ if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) { - void *mapping = rdev_mmap_full(&rdev); + void *mapping = rdev_mmap(&rdev, foffset, fsize); rdev_munmap(&rdev, mapping); if (cbfs_file_hash_mismatch(mapping, region_device_sz(&rdev), &mdata, false)) return CB_CBFS_HASH_MISMATCH; - if (mapping == prog_start(pstage)) + if (mapping == load) { + printk(BIOS_DEBUG, "Running stage in-place at %p, entry=%p\n", mapping, prog_entry(pstage)); return CB_SUCCESS; + } }
/* LZ4 stages can be decompressed in-place to save mapping scratch space. Load the compressed data to the end of the buffer and point &rdev to that memory location. */ if (cbfs_lz4_enabled() && compression == CBFS_COMPRESS_LZ4) { + assert(stage_type == CBFS_TYPE_STAGE); + size_t in_size = region_device_sz(&rdev); void *compr_start = prog_start(pstage) + prog_size(pstage) - in_size; if (rdev_readat(&rdev, compr_start, 0, in_size) != in_size) @@ -600,16 +640,15 @@ rdev_chain_mem(&rdev, compr_start, in_size); }
- size_t fsize = cbfs_load_and_decompress(&rdev, prog_start(pstage), prog_size(pstage), - compression, &mdata, false); + fsize = cbfs_load_and_decompress_offset(&rdev, load, memsize, + compression, &mdata, false, foffset, fsize); if (!fsize) return CB_ERR;
/* Clear area not covered by file. */ - memset(prog_start(pstage) + fsize, 0, prog_size(pstage) - fsize); + memset(load + fsize, 0, memsize - fsize);
- prog_segment_loaded((uintptr_t)prog_start(pstage), prog_size(pstage), - SEG_FINAL); + prog_segment_loaded((uintptr_t)load, memsize, SEG_FINAL);
return CB_SUCCESS; }