HAOUAS Elyes has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/44310 )
Change subject: src/cpu/x86/smm: Convert to 96 characters line length ......................................................................
src/cpu/x86/smm: Convert to 96 characters line length
Change-Id: I0ef58342a183071b59d8c74a6220acd4a1ffb019 Signed-off-by: Elyes HAOUAS ehaouas@noos.fr --- M src/cpu/x86/smm/smihandler.c M src/cpu/x86/smm/smm_module_handler.c M src/cpu/x86/smm/smm_module_loader.c 3 files changed, 87 insertions(+), 119 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/10/44310/1
diff --git a/src/cpu/x86/smm/smihandler.c b/src/cpu/x86/smm/smihandler.c index 8fd95bb..b70f757 100644 --- a/src/cpu/x86/smm/smihandler.c +++ b/src/cpu/x86/smm/smihandler.c @@ -36,8 +36,7 @@ typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
/* SMI multiprocessing semaphore */ -static __attribute__((aligned(4))) volatile smi_semaphore smi_handler_status - = SMI_UNLOCKED; +static __attribute__((aligned(4))) volatile smi_semaphore smi_handler_status = SMI_UNLOCKED;
static int smi_obtain_lock(void) { @@ -74,9 +73,7 @@
void io_trap_handler(int smif) { - /* If a handler function handled a given IO trap, it - * shall return a non-zero value - */ + /* If a handler function handled a given IO trap, it shall return a non-zero value */ printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
if (southbridge_io_trap_handler(smif)) @@ -141,13 +138,12 @@
/* Are we ok to execute the handler? */ if (!smi_obtain_lock()) { - /* For security reasons we don't release the other CPUs - * until the CPU with the lock is actually done + /* For security reasons we don't release the other CPUs until the CPU with the + * lock is actually done */ while (smi_handler_status == SMI_LOCKED) { asm volatile ( - ".byte 0xf3, 0x90\n" /* hint a CPU we are in - * spinlock (PAUSE + ".byte 0xf3, 0x90\n" /* hint a CPU we are in spinlock (PAUSE * instruction, REP NOP) */ ); @@ -167,34 +163,30 @@ case 0x00030002: case 0x00030007: state_save.type = LEGACY; - state_save.legacy_state_save = - smm_save_state(smm_base, - SMM_LEGACY_ARCH_OFFSET, node); + state_save.legacy_state_save = smm_save_state(smm_base, SMM_LEGACY_ARCH_OFFSET, + node); break; case 0x00030100: state_save.type = EM64T100; - state_save.em64t100_state_save = - smm_save_state(smm_base, - SMM_EM64T100_ARCH_OFFSET, node); + state_save.em64t100_state_save = smm_save_state(smm_base, + SMM_EM64T100_ARCH_OFFSET, node); break; case 0x00030101: /* SandyBridge, IvyBridge, and Haswell */ state_save.type = EM64T101; - state_save.em64t101_state_save = - smm_save_state(smm_base, - SMM_EM64T101_ARCH_OFFSET, node); + state_save.em64t101_state_save = smm_save_state(smm_base, + SMM_EM64T101_ARCH_OFFSET, node); break; case 0x00020064: case 0x00030064: state_save.type = AMD64; - state_save.amd64_state_save = - smm_save_state(smm_base, - SMM_AMD64_ARCH_OFFSET, node); + state_save.amd64_state_save = smm_save_state(smm_base, SMM_AMD64_ARCH_OFFSET, + node); break; default: printk(BIOS_DEBUG, "smm_revision: 0x%08x\n", smm_revision); printk(BIOS_DEBUG, "SMI# not supported on your CPU\n"); - /* Don't release lock, so no further SMI will happen, - * if we don't handle it anyways. + /* Don't release lock, so no further SMI will happen, if we don't handle it + * anyways. */ return; } @@ -218,10 +210,9 @@ smi_set_eos(); }
-/* Provide a default implementation for all weak handlers so that relocation - * entries in the modules make sense. Without default implementations the - * weak relocations w/o a symbol have a 0 address which is where the modules - * are linked at. */ +/* Provide a default implementation for all weak handlers so that relocation entries in the + * modules make sense. Without default implementations the weak relocations w/o a symbol have + * a 0 address which is where the modules are linked at. */ int __weak mainboard_io_trap_handler(int smif) { return 0; } void __weak southbridge_smi_handler(void) {} void __weak mainboard_smi_gpi(u32 gpi_sts) {} diff --git a/src/cpu/x86/smm/smm_module_handler.c b/src/cpu/x86/smm/smm_module_handler.c index 02682b4..9d88245 100644 --- a/src/cpu/x86/smm/smm_module_handler.c +++ b/src/cpu/x86/smm/smm_module_handler.c @@ -47,9 +47,7 @@
void io_trap_handler(int smif) { - /* If a handler function handled a given IO trap, it - * shall return a non-zero value - */ + /* If a handler function handled a given IO trap, it shall return a non-zero value */ printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
if (southbridge_io_trap_handler(smif)) @@ -97,8 +95,8 @@ { char *base;
- /* This function assumes all save states start at top of default - * SMRAM size space and are staggered down by save state size. */ + /* This function assumes all save states start at top of default SMRAM size space and + * are staggered down by save state size. */ base = (void *)smm_runtime->smbase; base += SMM_DEFAULT_SIZE; base -= (cpu + 1) * smm_runtime->save_state_size; @@ -127,22 +125,21 @@ cpu = p->cpu; expected_canary = (uintptr_t)p->canary;
- /* Make sure to set the global runtime. It's OK to race as the value - * will be the same across CPUs as well as multiple SMIs. */ + /* Make sure to set the global runtime. It's OK to race as the value will be the same + * across CPUs as well as multiple SMIs. */ if (smm_runtime == NULL) smm_runtime = runtime;
if (cpu >= CONFIG_MAX_CPUS) { console_init(); - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); + printk(BIOS_CRIT, "Invalid CPU number assigned in SMM stub: %d\n", cpu); return; }
/* Are we ok to execute the handler? */ if (!smi_obtain_lock()) { - /* For security reasons we don't release the other CPUs - * until the CPU with the lock is actually done */ + /* For security reasons we don't release the other CPUs until the CPU with the + * lock is actually done */ while (smi_handler_status == SMI_LOCKED) { asm volatile ( ".byte 0xf3, 0x90\n" /* PAUSE */ @@ -174,8 +171,7 @@ actual_canary = *p->canary;
if (actual_canary != expected_canary) { - printk(BIOS_DEBUG, "canary 0x%lx != 0x%lx\n", actual_canary, - expected_canary); + printk(BIOS_DEBUG, "canary 0x%lx != 0x%lx\n", actual_canary, expected_canary);
// Don't die if we can't indicate an error. if (CONFIG(DEBUG_SMI)) @@ -190,10 +186,9 @@
RMODULE_ENTRY(smm_handler_start);
-/* Provide a default implementation for all weak handlers so that relocation - * entries in the modules make sense. Without default implementations the - * weak relocations w/o a symbol have a 0 address which is where the modules - * are linked at. */ +/* Provide a default implementation for all weak handlers so that relocation entries in the + * modules make sense. Without default implementations the weak relocations w/o a symbol have + * a 0 address which is where the modules are linked at. */ int __weak mainboard_io_trap_handler(int smif) { return 0; } void __weak cpu_smi_handler(void) {} void __weak northbridge_smi_handler() {} diff --git a/src/cpu/x86/smm/smm_module_loader.c b/src/cpu/x86/smm/smm_module_loader.c index fc1e1b3..15c870b 100644 --- a/src/cpu/x86/smm/smm_module_loader.c +++ b/src/cpu/x86/smm/smm_module_loader.c @@ -10,9 +10,8 @@
#define FXSAVE_SIZE 512
-/* FXSAVE area during relocation. While it may not be strictly needed the - SMM stub code relies on the FXSAVE area being non-zero to enable SSE - instructions within SMM mode. */ +/* FXSAVE area during relocation. While it may not be strictly needed the SMM stub code relies + * on the FXSAVE area being non-zero to enable SSE instructions within SMM mode. */ static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE] __attribute__((aligned(16)));
@@ -26,8 +25,8 @@ * The components are assumed to consist of one consecutive region. */
-/* These parameters are used by the SMM stub code. A pointer to the params - * is also passed to the C-base handler. */ +/* These parameters are used by the SMM stub code. A pointer to the params is also passed to the + * C-base handler. */ struct smm_stub_params { u32 stack_size; u32 stack_top; @@ -39,8 +38,8 @@ } __packed;
/* - * The stub is the entry point that sets up protected mode and stacks for each - * CPU. It then calls into the SMM handler module. It is encoded as an rmodule. + * The stub is the entry point that sets up protected mode and stacks for each CPU. It then + * calls into the SMM handler module. It is encoded as an rmodule. */ extern unsigned char _binary_smmstub_start[];
@@ -48,8 +47,8 @@ #define SMM_MINIMUM_STACK_SIZE 32
/* - * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry - * addresses across CPUs. + * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry addresses + * across CPUs. * * 0xe9 <16-bit relative target> ; jmp <relative-offset> */ @@ -59,9 +58,9 @@ } __packed;
/* - * Place the entry instructions for num entries beginning at entry_start with - * a given stride. The entry_start is the highest entry point's address. All - * other entry points are stride size below the previous. + * Place the entry instructions for num entries beginning at entry_start with a given stride. + * The entry_start is the highest entry point's address. All other entry points are stride size + * below the previous. */ static void smm_place_jmp_instructions(void *entry_start, size_t stride, size_t num, void *jmp_target) @@ -70,20 +69,18 @@ char *cur; struct smm_entry_ins entry = { .jmp_rel = 0xe9 };
- /* Each entry point has an IP value of 0x8000. The SMBASE for each - * CPU is different so the effective address of the entry instruction - * is different. Therefore, the relative displacement for each entry - * instruction needs to be updated to reflect the current effective - * IP. Additionally, the IP result from the jmp instruction is - * calculated using the next instruction's address so the size of - * the jmp instruction needs to be taken into account. */ + /* Each entry point has an IP value of 0x8000. The SMBASE for each CPU is different so + * the effective address of the entry instruction is different. Therefore, the relative + * displacement for each entry instruction needs to be updated to reflect the current + * effective IP. Additionally, the IP result from the jmp instruction is calculated + * using the next instruction's address so the size of the jmp instruction needs to be + * taken into account. */ cur = entry_start; for (i = 0; i < num; i++) { uint32_t disp = (uintptr_t)jmp_target;
disp -= sizeof(entry) + (uintptr_t)cur; - printk(BIOS_DEBUG, - "SMM Module: placing jmp sequence at %p rel16 0x%04x\n", + printk(BIOS_DEBUG, "SMM Module: placing jmp sequence at %p rel16 0x%04x\n", cur, disp); entry.rel16 = disp; memcpy(cur, &entry, sizeof(entry)); @@ -91,10 +88,9 @@ } }
-/* Place stacks in base -> base + size region, but ensure the stacks don't - * overlap the staggered entry points. */ -static void *smm_stub_place_stacks(char *base, size_t size, - struct smm_loader_params *params) +/* Place stacks in base -> base + size region, but ensure the stacks don't overlap the staggered + * entry points. */ +static void *smm_stub_place_stacks(char *base, size_t size, struct smm_loader_params *params) { size_t total_stack_size; char *stacks_top; @@ -102,10 +98,8 @@ if (params->stack_top != NULL) return params->stack_top;
- /* If stack space is requested assume the space lives in the lower - * half of SMRAM. */ - total_stack_size = params->per_cpu_stack_size * - params->num_concurrent_stacks; + /* If stack space is requested assume the space lives in the lower half of SMRAM. */ + total_stack_size = params->per_cpu_stack_size * params->num_concurrent_stacks;
/* There has to be at least one stack user. */ if (params->num_concurrent_stacks < 1) @@ -121,9 +115,8 @@ return stacks_top; }
-/* Place the staggered entry points for each CPU. The entry points are - * staggered by the per CPU SMM save state size extending down from - * SMM_ENTRY_OFFSET. */ +/* Place the staggered entry points for each CPU. The entry points are staggered by the per + * CPU SMM save state size extending down from SMM_ENTRY_OFFSET. */ static void smm_stub_place_staggered_entry_points(char *base, const struct smm_loader_params *params, const struct rmodule *smm_stub) { @@ -131,40 +124,36 @@
stub_entry_offset = rmodule_entry_offset(smm_stub);
- /* If there are staggered entry points or the stub is not located - * at the SMM entry point then jmp instructions need to be placed. */ + /* If there are staggered entry points or the stub is not located at the SMM entry point + * then jmp instructions need to be placed. */ if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) { size_t num_entries;
base += SMM_ENTRY_OFFSET; num_entries = params->num_concurrent_save_states; - /* Adjust beginning entry and number of entries down since - * the initial entry point doesn't need a jump sequence. */ + /* Adjust beginning entry and number of entries down since the initial entry + * point doesn't need a jump sequence. */ if (stub_entry_offset == 0) { base -= params->per_cpu_save_state_size; num_entries--; } - smm_place_jmp_instructions(base, - params->per_cpu_save_state_size, - num_entries, + smm_place_jmp_instructions(base, params->per_cpu_save_state_size, num_entries, rmodule_entry(smm_stub)); } }
/* - * The stub setup code assumes it is completely contained within the - * default SMRAM size (0x10000). There are potentially 3 regions to place - * within the default SMRAM size: + * The stub setup code assumes it is completely contained within the default + * SMRAM size (0x10000). There are potentially 3 regions to place within the default SMRAM size: * 1. Save state areas * 2. Stub code * 3. Stack areas * - * The save state and stack areas are treated as contiguous for the number of - * concurrent areas requested. The save state always lives at the top of SMRAM - * space, and the entry point is at offset 0x8000. + * The save state and stack areas are treated as contiguous for the number of concurrent areas + * requested. The save state always lives at the top of SMRAM space, and the entry point is at + * offset 0x8000. */ -static int smm_module_setup_stub(void *smbase, size_t smm_size, - struct smm_loader_params *params, +static int smm_module_setup_stub(void *smbase, size_t smm_size, struct smm_loader_params *params, void *fxsave_area) { size_t total_save_state_size; @@ -209,8 +198,8 @@ smm_stub_size = rmodule_memory_size(&smm_stub); stub_entry_offset = rmodule_entry_offset(&smm_stub);
- /* Assume the stub is always small enough to live within upper half of - * SMRAM region after the save state space has been allocated. */ + /* Assume the stub is always small enough to live within upper half of SMRAM region + * after the save state space has been allocated. */ smm_stub_loc = &base[SMM_ENTRY_OFFSET];
/* Adjust for jmp instruction sequence. */ @@ -229,10 +218,9 @@ /* The stacks, if requested, live in the lower half of SMRAM space. */ size = SMM_ENTRY_OFFSET;
- /* Ensure stacks don't encroach onto staggered SMM - * entry points. The staggered entry points extend - * below SMM_ENTRY_OFFSET by the number of concurrent - * save states - 1 and save state size. */ + /* Ensure stacks don't encroach onto staggered SMM entry points. The staggered entry + * points extend below SMM_ENTRY_OFFSET by the number of concurrent save states - 1 and + * save state size. */ if (params->num_concurrent_save_states > 1) { size -= total_save_state_size; size += params->per_cpu_save_state_size; @@ -278,17 +266,16 @@ }
/* - * smm_setup_relocation_handler assumes the callback is already loaded in - * memory. i.e. Another SMM module isn't chained to the stub. The other - * assumption is that the stub will be entered from the default SMRAM - * location: 0x30000 -> 0x40000. + * smm_setup_relocation_handler assumes the callback is already loaded in memory. i.e. Another + * SMM module isn't chained to the stub. The other assumption is that the stub will be entered + * from the default SMRAM location: 0x30000 -> 0x40000. */ int smm_setup_relocation_handler(struct smm_loader_params *params) { void *smram = (void *)SMM_DEFAULT_BASE;
- /* There can't be more than 1 concurrent save state for the relocation - * handler because all CPUs default to 0x30000 as SMBASE. */ + /* There can't be more than 1 concurrent save state for the relocation handler because + * all CPUs default to 0x30000 as SMBASE. */ if (params->num_concurrent_save_states > 1) return -1;
@@ -296,17 +283,15 @@ if (params->handler == NULL) return -1;
- /* Since the relocation handler always uses stack, adjust the number - * of concurrent stack users to be CONFIG_MAX_CPUS. */ + /* Since the relocation handler always uses stack, adjust the number of concurrent stack + * users to be CONFIG_MAX_CPUS. */ if (params->num_concurrent_stacks == 0) params->num_concurrent_stacks = CONFIG_MAX_CPUS;
- return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE, - params, fxsave_area_relocation); + return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE, params, fxsave_area_relocation); }
-/* The SMM module is placed within the provided region in the following - * manner: +/* The SMM module is placed within the provided region in the following manner: * +-----------------+ <- smram + size * | BIOS resource | * | list (STM) | @@ -322,10 +307,9 @@ * | stub code | * +-----------------+ <- smram * - * It should be noted that this algorithm will not work for - * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm - * expects a region large enough to encompass the handler and stacks - * as well as the SMM_DEFAULT_SIZE. + * It should be noted that this algorithm will not work for SMM_DEFAULT_SIZE SMRAM regions such + * as the A segment. This algorithm expects a region large enough to encompass the handler and + * stacks as well as the SMM_DEFAULT_SIZE. */ int smm_load_module(void *smram, size_t size, struct smm_loader_params *params) { @@ -350,8 +334,7 @@ if (CONFIG(DEBUG_SMI)) memset(smram, 0xcd, size);
- total_stack_size = params->per_cpu_stack_size * - params->num_concurrent_stacks; + total_stack_size = params->per_cpu_stack_size * params->num_concurrent_stacks;
/* Stacks start at the top of the region. */ base = smram; @@ -362,14 +345,13 @@
params->stack_top = base;
- /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment - * taken into account. */ + /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment taken into + * account. */ base = smram; base += SMM_DEFAULT_SIZE; handler_size = rmodule_memory_size(&smm_mod); module_alignment = rmodule_load_alignment(&smm_mod); - alignment_size = module_alignment - - ((uintptr_t)base % module_alignment); + alignment_size = module_alignment - ((uintptr_t)base % module_alignment); if (alignment_size != module_alignment) { handler_size += alignment_size; base += alignment_size;
build bot (Jenkins) has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/44310 )
Change subject: src/cpu/x86/smm: Convert to 96 characters line length ......................................................................
Patch Set 1:
(1 comment)
https://review.coreboot.org/c/coreboot/+/44310/1/src/cpu/x86/smm/smm_module_... File src/cpu/x86/smm/smm_module_loader.c:
https://review.coreboot.org/c/coreboot/+/44310/1/src/cpu/x86/smm/smm_module_... PS1, Line 156: static int smm_module_setup_stub(void *smbase, size_t smm_size, struct smm_loader_params *params, line over 96 characters
HAOUAS Elyes has abandoned this change. ( https://review.coreboot.org/c/coreboot/+/44310 )
Change subject: src/cpu/x86/smm: Convert to 96 characters line length ......................................................................
Abandoned
Merge Conflict