Arthur Heymans has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/68893 )
Change subject: [WIP]cpu/mp_init: Detect the number of CPUs are runtime ......................................................................
[WIP]cpu/mp_init: Detect the number of CPUs are runtime
On some systems the only way to find out what CPUs are are present is by in initializing them and seeing which one checks in.
TESTED with qemu reporting a too high number of CPUs.
Change-Id: Iafe9d3d4838dad46cd0c7b6d30b905cbd258f17f Signed-off-by: Arthur Heymans arthur@aheymans.xyz --- M src/cpu/x86/Kconfig M src/cpu/x86/mp_init.c 2 files changed, 63 insertions(+), 18 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/93/68893/1
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig index bd3be78..3ae16b6 100644 --- a/src/cpu/x86/Kconfig +++ b/src/cpu/x86/Kconfig @@ -27,6 +27,16 @@ with a stub at 0x30000. This is useful on platforms that have an alternative way to set SMBASE.
+config X86_UNKNOWN_NUMBER_OF_CPUS + bool + default n + depends on PARALLEL_MP + help + Select this on platforms where the BSP cannot know the number of + lapics that will check except by just launching them all and + seeing who responds. + + config LEGACY_SMP_INIT bool
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index 12f0bf6..cf699c0 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -121,7 +121,7 @@ struct mp_flight_record *records; };
-static int global_num_aps; +static volatile int global_num_aps; static struct mp_flight_plan mp_info;
static inline void barrier_wait(atomic_t *b) @@ -137,11 +137,11 @@ atomic_set(b, 1); }
-static enum cb_err wait_for_aps(atomic_t *val, int target, int total_delay, +static enum cb_err wait_for_aps(atomic_t *val, volatile int *target, int total_delay, int delay_step) { int delayed = 0; - while (atomic_read(val) != target) { + while (atomic_read(val) != *target) { udelay(delay_step); delayed += delay_step; if (delayed >= total_delay) { @@ -466,7 +466,7 @@ return CB_ERR;
/* Wait for CPUs to check in up to 200 us. */ - wait_for_aps(num_aps, ap_count, 200 /* us */, 15 /* us */); + wait_for_aps(num_aps, &ap_count, 200 /* us */, 15 /* us */); }
/* Send final SIPI */ @@ -474,10 +474,25 @@ return CB_ERR;
/* Wait for CPUs to check in. */ - if (wait_for_aps(num_aps, ap_count, 100000 /* 100 ms */, 50 /* us */) != CB_SUCCESS) { - printk(BIOS_ERR, "Not all APs checked in: %d/%d.\n", + if (wait_for_aps(num_aps, &ap_count, 100000 /* 100 ms */, 50 /* us */) != CB_SUCCESS) { + printk(CONFIG(X86_UNKNOWN_NUMBER_OF_CPUS) ? BIOS_INFO : BIOS_ERR, + "Not all APs checked in: %d/%d.\n", atomic_read(num_aps), ap_count); - return CB_ERR; + if (!CONFIG(X86_UNKNOWN_NUMBER_OF_CPUS)) + return CB_ERR; + } + + if (CONFIG(X86_UNKNOWN_NUMBER_OF_CPUS)) { + global_num_aps = atomic_read(num_aps); + /* Unlink CPUs from list */ + struct device *prev_cpu = cpu_bus->children; + for (struct device *cpu = cpu_bus->children; cpu; cpu = cpu->sibling) { + if (!cpu->enabled) { + prev_cpu->sibling = cpu->sibling; + } else { + prev_cpu = cpu; + } + } }
return CB_SUCCESS; @@ -495,7 +510,6 @@ */ const int timeout_us = MAX(1000000, 100000 * mp_params->num_cpus); const int step_us = 100; - int num_aps = mp_params->num_cpus - 1; struct stopwatch sw;
stopwatch_init(&sw); @@ -506,7 +520,7 @@ /* Wait for APs if the record is not released. */ if (atomic_read(&rec->barrier) == 0) { /* Wait for the APs to check in. */ - if (wait_for_aps(&rec->cpus_entered, num_aps, + if (wait_for_aps(&rec->cpus_entered, &global_num_aps, timeout_us, step_us) != CB_SUCCESS) { printk(BIOS_ERR, "MP record %d timeout.\n", i); ret = CB_ERR; @@ -548,6 +562,15 @@
}
+struct mp_state { + struct mp_ops ops; + int cpu_count; + uintptr_t perm_smbase; + size_t perm_smsize; + size_t smm_save_state_size; + bool do_smm; +} mp_state; + /* * mp_init() will set up the SIPI vector and bring up the APs according to * mp_params. Each flight record will be executed according to the plan. Note @@ -614,6 +637,8 @@ atomic_read(ap_count), global_num_aps); return CB_ERR; } + p->num_cpus = global_num_aps + 1; + mp_state.cpu_count = p->num_cpus;
/* Walk the flight plan for the BSP. */ return bsp_do_flight_plan(p); @@ -659,15 +684,6 @@ spin_unlock(&smm_relocation_lock); }
-struct mp_state { - struct mp_ops ops; - int cpu_count; - uintptr_t perm_smbase; - size_t perm_smsize; - size_t smm_save_state_size; - bool do_smm; -} mp_state; - static bool is_smm_enabled(void) { return CONFIG(HAVE_SMI_HANDLER) && mp_state.do_smm; @@ -1147,6 +1163,10 @@ if (ret == CB_SUCCESS && mp_state.ops.post_mp_init != NULL) mp_state.ops.post_mp_init();
+ for (struct device *cpu = cpu_bus->children; cpu; cpu = cpu->sibling) + printk(BIOS_DEBUG, "CPU: %s %s\n", + dev_path(cpu), cpu->enabled?"enabled":"disabled"); + return ret; }