[coreboot-gerrit] Patch set updated for coreboot: lib/selfboot: simplify bounce buffer logic

Aaron Durbin (adurbin@chromium.org) gerrit at coreboot.org
Tue Jul 12 19:08:32 CEST 2016


Aaron Durbin (adurbin at chromium.org) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/15613

-gerrit

commit 23b7059143c08dd641822d8bdd836727f28d6272
Author: Aaron Durbin <adurbin at chromium.org>
Date:   Mon Jul 11 22:07:46 2016 -0500

    lib/selfboot: simplify bounce buffer logic
    
    A lot of the bounce buffer logic was strewn throughout the
    code. However, not all platforms even use or bother with a
    bounce buffer. Therefore, consolidate the logic into one
    place.
    
    This change also fixes a bug where segments which overlap
    ramstage, but begin in the memory before ramstage were
    writing into memory not covered by the bounce buffer.
    
    Another difference is that uncompressed segments overlapping
    ramstage aren't split into separate segments. The result is
    that those segments will be written into the bounce buffer
    entirely and then subsequently copied to the destination
    address. That's the same behavior for compressed segments
    because there's no way to split a compressed segment. The
    negative effect of this simplification is that if people
    are using large uncompressed payloads that partially cover
    the ramstage area will end up doing 2x the number of memcpy()s.
    
    Change-Id: I9b818f61fa7ada419c587962c98e08a9a5d99d67
    Signed-off-by: Aaron Durbin <adurbin at chromium.org>
---
 src/lib/selfboot.c | 341 ++++++++++++++++++++++++++++-------------------------
 1 file changed, 180 insertions(+), 161 deletions(-)

diff --git a/src/lib/selfboot.c b/src/lib/selfboot.c
index 8e84a68..db6ecc3 100644
--- a/src/lib/selfboot.c
+++ b/src/lib/selfboot.c
@@ -50,14 +50,6 @@ static void segment_insert_before(struct segment *seg, struct segment *new)
 	seg->prev = new;
 }
 
-static void segment_insert_after(struct segment *seg, struct segment *new)
-{
-	new->next = seg->next;
-	new->prev = seg;
-	seg->next->prev = new;
-	seg->next = new;
-}
-
 /* The problem:
  * Static executables all want to share the same addresses
  * in memory because only a few addresses are reliably present on
@@ -78,21 +70,87 @@ static void segment_insert_after(struct segment *seg, struct segment *new)
  *   and much simpler than the general case implemented in kexec.
  */
 
-static unsigned long bounce_size, bounce_buffer;
+static int overlaps_coreboot(struct segment *seg)
+{
+	unsigned long start, end;
+	start = seg->s_dstaddr;
+	end = start + seg->s_memsz;
+	return !((end <= lb_start) || (start >= lb_end));
+}
+
+/* Offset used to calculate addresses in the bounce buffer area. */
+static unsigned long translation_offet;
 
-static void get_bounce_buffer(unsigned long req_size)
+static unsigned long addr_to_bounce_buffer(unsigned long addr)
 {
-	unsigned long lb_size;
-	void *buffer;
+	return addr + translation_offet;
+}
 
-	/* When the ramstage is relocatable there is no need for a bounce
-	 * buffer. All payloads should not overlap the ramstage.
+/*
+ * Split uncompressed segments which overlap ramstage to optimize away
+ * the use of the bounce buffer since the uncompressed segments that fall
+ * just before or just after ramstage can be directly written as their
+ * own segment.
+ */
+static void split_uncompressed_segment_by_address(struct segment *seg,
+							unsigned long addr)
+{
+	struct segment *new;
+	unsigned long start;
+	unsigned long end;
+	unsigned long preceding_memsz;
+	unsigned long orig_filesz;
+	unsigned long orig_memsz;
+
+	orig_memsz = seg->s_memsz;
+	start = seg->s_dstaddr;
+	end = start + orig_memsz;
+	orig_filesz = seg->s_filesz;
+
+	/*
+	 * There's nothing to do if the segment starts after the split address.
 	 */
-	if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE)) {
-		bounce_buffer = ~0UL;
-		bounce_size = 0;
+	if (start >= addr)
 		return;
-	}
+
+	new = malloc(sizeof(*new));
+	*new = *seg;
+
+	/* The segment is split into 2 segments along the split address. */
+	preceding_memsz = addr - start;
+	seg->s_dstaddr = addr;
+	seg->s_memsz -= preceding_memsz;
+	new->s_memsz = preceding_memsz;
+
+	/* Determine new file sizes and start offsets. */
+	unsigned long preceding_filesz = MIN(orig_filesz, preceding_memsz);
+	new->s_filesz = preceding_filesz;
+	seg->s_srcaddr += preceding_filesz;
+	seg->s_filesz -= preceding_filesz;
+
+	/* Place new segment before the curent one as the current one is
+	 * already in the linked list. */
+	segment_insert_before(seg, new);
+}
+
+static void split_uncompressed_segment(struct segment *seg)
+{
+	split_uncompressed_segment_by_address(seg, lb_start);
+	split_uncompressed_segment_by_address(seg, lb_end);
+}
+
+/*
+ * bounce_low and bounce_high describe the bounding region of memory where
+ * segments overlap the ramstage memory.
+ */
+static unsigned long allocate_bounce_buffer(unsigned long bounce_low,
+						unsigned long bounce_high)
+{
+	unsigned long lb_size;
+	unsigned long req_size;
+	void *buffer;
+
+	req_size = bounce_high - bounce_low;
 
 	lb_size = lb_end - lb_start;
 	/* Plus coreboot size so I have somewhere
@@ -104,120 +162,125 @@ static void get_bounce_buffer(unsigned long req_size)
 
 	printk(BIOS_SPEW, "Bounce Buffer at %p, %lu bytes\n", buffer, lb_size);
 
-	bounce_buffer = (uintptr_t)buffer;
-	bounce_size = req_size;
+	return (uintptr_t)buffer;
 }
 
-static int overlaps_coreboot(struct segment *seg)
+static int bounce_buffer_init(struct segment *head)
 {
-	unsigned long start, end;
-	start = seg->s_dstaddr;
-	end = start + seg->s_memsz;
-	return !((end <= lb_start) || (start >= lb_end));
-}
+	struct segment *seg;
+	unsigned long buffer;
+	bool bb_required = false;
 
-static int relocate_segment(unsigned long buffer, struct segment *seg)
-{
-	/* Modify all segments that want to load onto coreboot
-	 * to load onto the bounce buffer instead.
-	 */
-	/* ret:  1 : A new segment is inserted before the seg.
-	 *       0 : A new segment is inserted after the seg, or no new one.
+	/*
+	 * When the ramstage is relocatable there is no need for a bounce
+	 * buffer. All payloads should not overlap the ramstage.
 	 */
-	unsigned long start, middle, end, ret = 0;
+	if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE))
+		return 0;
 
-	printk(BIOS_SPEW, "lb: [0x%016lx, 0x%016lx)\n",
-		lb_start, lb_end);
+	/* Determine if any segments overlap ramstage. */
+	for (seg = head->next; seg != head; seg = seg->next) {
+		if (overlaps_coreboot(seg))
+			bb_required = true;
+	}
 
-	/* I don't conflict with coreboot so get out of here */
-	if (!overlaps_coreboot(seg))
+	if (!bb_required)
 		return 0;
 
 	if (!arch_supports_bounce_buffer())
-		die ("bounce buffer not supported");
+		die("bounce buffer not supported");
 
-	start = seg->s_dstaddr;
-	middle = start + seg->s_filesz;
-	end = start + seg->s_memsz;
+	/*
+	 * Add segments to bootmem memory map before a bounce buffer is
+	 * allocated so that there aren't conflicts with the actual payload.
+	 */
+	for (seg = head->next; seg != head; seg = seg->next)
+		bootmem_add_range(seg->s_dstaddr, seg->s_memsz,
+					LB_MEM_UNUSABLE);
 
-	printk(BIOS_SPEW, "segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
-		start, middle, end);
+	/*
+	 * Split uncompressed segments that overlap coreboot to reduce
+	 * unnecessary bounce buffer usage since the portions of the segments
+	 * that are outside of the ramstage area can be directly written.
+	 */
+	for (seg = head->next; seg != head; seg = seg->next) {
+		if (seg->compression != CBFS_COMPRESS_NONE)
+			continue;
+		if (!overlaps_coreboot(seg))
+			continue;
+		split_uncompressed_segment(seg);
+	}
 
-	if (seg->compression == CBFS_COMPRESS_NONE) {
-		/* Slice off a piece at the beginning
-		 * that doesn't conflict with coreboot.
-		 */
-		if (start < lb_start) {
-			struct segment *new;
-			unsigned long len = lb_start - start;
-			new = malloc(sizeof(*new));
-			*new = *seg;
-			new->s_memsz = len;
-			seg->s_memsz -= len;
-			seg->s_dstaddr += len;
-			seg->s_srcaddr += len;
-			if (seg->s_filesz > len) {
-				new->s_filesz = len;
-				seg->s_filesz -= len;
-			} else {
-				seg->s_filesz = 0;
-			}
+	/*
+	 * For all segments which overlap ramstage determine the range
+	 * of memory which covers the low and high address of the segments.
+	 * There's a minimum bounce buffer size equal to the size of ramstage
+	 * once a bounce buffer is determined to be required.
+	 */
+	unsigned long bounce_low = lb_start;
+	unsigned long bounce_high = lb_end;
+	for (seg = head->next; seg != head; seg = seg->next) {
+		if (!overlaps_coreboot(seg))
+			continue;
+		if (seg->s_dstaddr + seg->s_memsz > bounce_high)
+			bounce_high = seg->s_dstaddr + seg->s_memsz;
+		if (seg->s_dstaddr < bounce_low)
+			bounce_low = seg->s_dstaddr;
+	}
 
-			/* Order by stream offset */
-			segment_insert_before(seg, new);
+	/* Allocate bounce buffer and set up translation offset. */
+	buffer = allocate_bounce_buffer(bounce_low, bounce_high);
+	translation_offet = buffer - bounce_low;
 
-			/* compute the new value of start */
-			start = seg->s_dstaddr;
+	/* Target segments within the bounce buffer that overlap. */
+	for (seg = head->next; seg != head; seg = seg->next) {
+		unsigned long new_dstaddr;
 
-			printk(BIOS_SPEW, "   early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
-				new->s_dstaddr,
-				new->s_dstaddr + new->s_filesz,
-				new->s_dstaddr + new->s_memsz);
+		if (!overlaps_coreboot(seg))
+			continue;
 
-			ret = 1;
-		}
+		new_dstaddr = addr_to_bounce_buffer(seg->s_dstaddr);
 
-		/* Slice off a piece at the end
-		 * that doesn't conflict with coreboot
-		 */
-		if (end > lb_end) {
-			unsigned long len = lb_end - start;
-			struct segment *new;
-			new = malloc(sizeof(*new));
-			*new = *seg;
-			seg->s_memsz = len;
-			new->s_memsz -= len;
-			new->s_dstaddr += len;
-			new->s_srcaddr += len;
-			if (seg->s_filesz > len) {
-				seg->s_filesz = len;
-				new->s_filesz -= len;
-			} else {
-				new->s_filesz = 0;
-			}
-			/* Order by stream offset */
-			segment_insert_after(seg, new);
+		printk(BIOS_DEBUG, "Relocation: addr: 0x%08lx memsz: 0x%08lx filesz: 0x%08lx -> addr: 0x%08lx\n",
+			seg->s_dstaddr, seg->s_memsz, seg->s_filesz,
+			new_dstaddr);
 
-			printk(BIOS_SPEW, "   late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
-				new->s_dstaddr,
-				new->s_dstaddr + new->s_filesz,
-				new->s_dstaddr + new->s_memsz);
-		}
+		seg->s_dstaddr = new_dstaddr;
 	}
 
-	/* Now retarget this segment onto the bounce buffer */
-	/* sort of explanation: the buffer is a 1:1 mapping to coreboot.
-	 * so you will make the dstaddr be this buffer, and it will get copied
-	 * later to where coreboot lives.
+	/*
+	 * Place "fake" segments at the end of the segment list covering
+	 * the partial overlap regions preceding and succeeding the
+	 * ramstage area.  These newly added segments handle the copying
+	 * of the payload that surrounds the ramstage area after the full
+	 * contents were written into the bounce buffer. These segments
+	 * only exist when compressed segments overlap the ramstage area.
+	 * If uncompressed segments targetted the ramastage are they would
+	 * have been split already.
 	 */
-	seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
+	if (bounce_low < lb_start) {
+		struct segment *new;
+		new = malloc(sizeof(*new));
+		new->s_dstaddr = bounce_low;
+		new->s_srcaddr = addr_to_bounce_buffer(new->s_dstaddr);
+		new->s_memsz = lb_start - bounce_low;
+		new->s_filesz = new->s_memsz;
+		new->compression = CBFS_COMPRESS_NONE;
+		segment_insert_before(head, new);
+	}
 
-	printk(BIOS_SPEW, " bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
-		seg->s_dstaddr,
-		seg->s_dstaddr + seg->s_filesz,
-		seg->s_dstaddr + seg->s_memsz);
+	if (bounce_high > lb_end) {
+		struct segment *new;
+		new = malloc(sizeof(*new));
+		new->s_dstaddr = lb_end;
+		new->s_srcaddr = addr_to_bounce_buffer(new->s_dstaddr);
+		new->s_memsz = bounce_high - lb_end;
+		new->s_filesz = new->s_memsz;
+		new->compression = CBFS_COMPRESS_NONE;
+		segment_insert_before(head, new);
+	}
 
-	return ret;
+	return 0;
 }
 
 /* Decode a serialized cbfs payload segment
@@ -328,13 +391,10 @@ static int build_self_segment_list(
 	return 1;
 }
 
-static int load_self_segments(
-	struct segment *head,
-	struct prog *payload)
+static int load_self_segments(struct segment *head, struct prog *payload)
 {
 	struct segment *ptr;
 	const unsigned long one_meg = (1UL << 20);
-	unsigned long bounce_high = lb_end;
 
 	for(ptr = head->next; ptr != head; ptr = ptr->next) {
 		if (bootmem_region_targets_usable_ram(ptr->s_dstaddr,
@@ -357,25 +417,9 @@ static int load_self_segments(
 		return 0;
 	}
 
-	for(ptr = head->next; ptr != head; ptr = ptr->next) {
-		/*
-		 * Add segments to bootmem memory map before a bounce buffer is
-		 * allocated so that there aren't conflicts with the actual
-		 * payload.
-		 */
-		bootmem_add_range(ptr->s_dstaddr, ptr->s_memsz,
-					LB_MEM_UNUSABLE);
-
-		if (!overlaps_coreboot(ptr))
-			continue;
-		if (ptr->s_dstaddr + ptr->s_memsz > bounce_high)
-			bounce_high = ptr->s_dstaddr + ptr->s_memsz;
-	}
-	get_bounce_buffer(bounce_high - lb_start);
-	if (!bounce_buffer) {
-		printk(BIOS_ERR, "Could not find a bounce buffer...\n");
+	/* Bail out if bounce buffer initialization fails. */
+	if (bounce_buffer_init(head))
 		return 0;
-	}
 
 	for(ptr = head->next; ptr != head; ptr = ptr->next) {
 		unsigned char *dest, *src, *middle, *end;
@@ -383,16 +427,6 @@ static int load_self_segments(
 		printk(BIOS_DEBUG, "Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
 			ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
 
-		/* Modify the segment to load onto the bounce_buffer if necessary.
-		 */
-		if (relocate_segment(bounce_buffer, ptr)) {
-			ptr = (ptr->prev)->prev;
-			continue;
-		}
-
-		printk(BIOS_DEBUG, "Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
-			ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
-
 		/* Compute the boundaries of the segment */
 		dest = (unsigned char *)(ptr->s_dstaddr);
 		src = (unsigned char *)(ptr->s_srcaddr);
@@ -446,25 +480,6 @@ static int load_self_segments(
 			memset(middle, 0, end - middle);
 		}
 
-		/* Copy the data that's outside the area that shadows ramstage */
-		printk(BIOS_DEBUG, "dest %p, end %p, bouncebuffer %lx\n", dest, end, bounce_buffer);
-		if ((unsigned long)end > bounce_buffer) {
-			if ((unsigned long)dest < bounce_buffer) {
-				unsigned char *from = dest;
-				unsigned char *to = (unsigned char*)(lb_start-(bounce_buffer-(unsigned long)dest));
-				unsigned long amount = bounce_buffer-(unsigned long)dest;
-				printk(BIOS_DEBUG, "move prefix around: from %p, to %p, amount: %lx\n", from, to, amount);
-				memcpy(to, from, amount);
-			}
-			if ((unsigned long)end > bounce_buffer + (lb_end - lb_start)) {
-				unsigned long from = bounce_buffer + (lb_end - lb_start);
-				unsigned long to = lb_end;
-				unsigned long amount = (unsigned long)end - from;
-				printk(BIOS_DEBUG, "move suffix around: from %lx, to %lx, amount: %lx\n", from, to, amount);
-				memcpy((char*)to, (char*)from, amount);
-			}
-		}
-
 		/*
 		 * Each architecture can perform additonal operations
 		 * on the loaded segment
@@ -499,8 +514,12 @@ void *selfload(struct prog *payload)
 
 	rdev_munmap(prog_rdev(payload), data);
 
-	/* Update the payload's area with the bounce buffer information. */
-	prog_set_area(payload, (void *)(uintptr_t)bounce_buffer, bounce_size);
+	/*
+	 * Update the payload's area with the bounce buffer information. The
+	 * size of the bounce buffer is is equal to ramstage size.
+	 */
+	unsigned long addr = addr_to_bounce_buffer(lb_start);
+	prog_set_area(payload, (void *)(uintptr_t)addr, lb_end - lb_start);
 
 	return (void *)entry;
 



More information about the coreboot-gerrit mailing list