Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14561
-gerrit
commit 3465349e4fbb56e74d5a0417f001b3c783e18eb5
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Sat Apr 30 15:14:18 2016 -0500
cpu/x86: remove BACKUP_DEFAULT_SMM_REGION option
Almost all boards utilizing PARALLEL_MP were already selecting this
option. There already is logic in backup_default_smm.c which guards
checks HAVE_ACPI_RESUME to determine if it should actually take
action. Though having SMM also benefits from backup_default_smm_area()
the MP library places the SIPI vector there. Therefore, a platform
which doesn't utilize SMM but does have ACPI resume should backup
this memory region as well.
Change-Id: I14cf1318136a17f48ba5ae119507918190e25387
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/cpu/intel/haswell/Kconfig | 1 -
src/cpu/x86/Kconfig | 6 ----
src/cpu/x86/Makefile.inc | 1 +
src/cpu/x86/backup_default_smm.c | 64 ++++++++++++++++++++++++++++++++++++
src/cpu/x86/smm/Makefile.inc | 1 -
src/cpu/x86/smm/backup_default_smm.c | 64 ------------------------------------
src/soc/intel/baytrail/Kconfig | 1 -
src/soc/intel/braswell/Kconfig | 1 -
src/soc/intel/broadwell/Kconfig | 1 -
src/soc/intel/skylake/Kconfig | 1 -
10 files changed, 65 insertions(+), 76 deletions(-)
diff --git a/src/cpu/intel/haswell/Kconfig b/src/cpu/intel/haswell/Kconfig
index 779f1d6..ec75391 100644
--- a/src/cpu/intel/haswell/Kconfig
+++ b/src/cpu/intel/haswell/Kconfig
@@ -10,7 +10,6 @@ config CPU_SPECIFIC_OPTIONS
select ARCH_VERSTAGE_X86_32
select ARCH_ROMSTAGE_X86_32
select ARCH_RAMSTAGE_X86_32
- select BACKUP_DEFAULT_SMM_REGION
select HAVE_MONOTONIC_TIMER
select SMP
select MMX
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index 6cd65cc..e80f02b 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -127,12 +127,6 @@ config PLATFORM_USES_FSP1_0
Selected for Intel processors/platform combinations that use the
Intel Firmware Support Package (FSP) 1.0 for initialization.
-config BACKUP_DEFAULT_SMM_REGION
- def_bool n
- help
- The CPU support will select this option if the default SMM region
- needs to be backed up for suspend/resume purposes.
-
config MIRROR_PAYLOAD_TO_RAM_BEFORE_LOADING
def_bool n
help
diff --git a/src/cpu/x86/Makefile.inc b/src/cpu/x86/Makefile.inc
index 0efbdd7..142b954 100644
--- a/src/cpu/x86/Makefile.inc
+++ b/src/cpu/x86/Makefile.inc
@@ -5,6 +5,7 @@ endif
subdirs-$(CONFIG_PARALLEL_MP) += name
ramstage-$(CONFIG_PARALLEL_MP) += mp_init.c
ramstage-$(CONFIG_MIRROR_PAYLOAD_TO_RAM_BEFORE_LOADING) += mirror_payload.c
+ramstage-$(CONFIG_PARALLEL_MP) += backup_default_smm.c
additional-dirs += $(obj)/cpu/x86
diff --git a/src/cpu/x86/backup_default_smm.c b/src/cpu/x86/backup_default_smm.c
new file mode 100644
index 0000000..2023aed
--- /dev/null
+++ b/src/cpu/x86/backup_default_smm.c
@@ -0,0 +1,64 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <string.h>
+#include <arch/acpi.h>
+#include <console/console.h>
+#include <cbmem.h>
+#include <cpu/x86/smm.h>
+
+void *backup_default_smm_area(void)
+{
+ void *save_area;
+ const void *default_smm = (void *)SMM_DEFAULT_BASE;
+
+ if (!IS_ENABLED(CONFIG_HAVE_ACPI_RESUME))
+ return NULL;
+
+ /*
+ * The buffer needs to be preallocated regardless. In the non-resume
+ * path it will be allocated for handling resume. Note that cbmem_add()
+ * does a find before the addition.
+ */
+ save_area = cbmem_add(CBMEM_ID_SMM_SAVE_SPACE, SMM_DEFAULT_SIZE);
+
+ if (save_area == NULL) {
+ printk(BIOS_DEBUG, "SMM save area not added.\n");
+ return NULL;
+ }
+
+ /* Only back up the area on S3 resume. */
+ if (acpi_is_wakeup_s3()) {
+ memcpy(save_area, default_smm, SMM_DEFAULT_SIZE);
+ return save_area;
+ }
+
+ /*
+ * Not the S3 resume path. No need to restore memory contents after
+ * SMM relocation.
+ */
+ return NULL;
+}
+
+void restore_default_smm_area(void *smm_save_area)
+{
+ void *default_smm = (void *)SMM_DEFAULT_BASE;
+
+ if (smm_save_area == NULL)
+ return;
+
+ memcpy(default_smm, smm_save_area, SMM_DEFAULT_SIZE);
+}
diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc
index 72c9796..32f5ea7 100644
--- a/src/cpu/x86/smm/Makefile.inc
+++ b/src/cpu/x86/smm/Makefile.inc
@@ -13,7 +13,6 @@
## GNU General Public License for more details.
##
-ramstage-$(CONFIG_BACKUP_DEFAULT_SMM_REGION) += backup_default_smm.c
ramstage-y += smm_module_loader.c
ifeq ($(CONFIG_ARCH_RAMSTAGE_X86_32),y)
diff --git a/src/cpu/x86/smm/backup_default_smm.c b/src/cpu/x86/smm/backup_default_smm.c
deleted file mode 100644
index 2023aed..0000000
--- a/src/cpu/x86/smm/backup_default_smm.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2014 Google Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of
- * the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <string.h>
-#include <arch/acpi.h>
-#include <console/console.h>
-#include <cbmem.h>
-#include <cpu/x86/smm.h>
-
-void *backup_default_smm_area(void)
-{
- void *save_area;
- const void *default_smm = (void *)SMM_DEFAULT_BASE;
-
- if (!IS_ENABLED(CONFIG_HAVE_ACPI_RESUME))
- return NULL;
-
- /*
- * The buffer needs to be preallocated regardless. In the non-resume
- * path it will be allocated for handling resume. Note that cbmem_add()
- * does a find before the addition.
- */
- save_area = cbmem_add(CBMEM_ID_SMM_SAVE_SPACE, SMM_DEFAULT_SIZE);
-
- if (save_area == NULL) {
- printk(BIOS_DEBUG, "SMM save area not added.\n");
- return NULL;
- }
-
- /* Only back up the area on S3 resume. */
- if (acpi_is_wakeup_s3()) {
- memcpy(save_area, default_smm, SMM_DEFAULT_SIZE);
- return save_area;
- }
-
- /*
- * Not the S3 resume path. No need to restore memory contents after
- * SMM relocation.
- */
- return NULL;
-}
-
-void restore_default_smm_area(void *smm_save_area)
-{
- void *default_smm = (void *)SMM_DEFAULT_BASE;
-
- if (smm_save_area == NULL)
- return;
-
- memcpy(default_smm, smm_save_area, SMM_DEFAULT_SIZE);
-}
diff --git a/src/soc/intel/baytrail/Kconfig b/src/soc/intel/baytrail/Kconfig
index 8de32de..fcf382e 100644
--- a/src/soc/intel/baytrail/Kconfig
+++ b/src/soc/intel/baytrail/Kconfig
@@ -11,7 +11,6 @@ config CPU_SPECIFIC_OPTIONS
select ARCH_VERSTAGE_X86_32
select ARCH_ROMSTAGE_X86_32
select ARCH_RAMSTAGE_X86_32
- select BACKUP_DEFAULT_SMM_REGION
select CACHE_MRC_SETTINGS
select CPU_INTEL_TURBO_NOT_PACKAGE_SCOPED
select SUPPORT_CPU_UCODE_IN_CBFS
diff --git a/src/soc/intel/braswell/Kconfig b/src/soc/intel/braswell/Kconfig
index 053aa29..3c6f788 100644
--- a/src/soc/intel/braswell/Kconfig
+++ b/src/soc/intel/braswell/Kconfig
@@ -11,7 +11,6 @@ config CPU_SPECIFIC_OPTIONS
select ARCH_RAMSTAGE_X86_32
select ARCH_ROMSTAGE_X86_32
select ARCH_VERSTAGE_X86_32
- select BACKUP_DEFAULT_SMM_REGION
select CACHE_MRC_SETTINGS
select CACHE_RELOCATED_RAMSTAGE_OUTSIDE_CBMEM if RELOCATABLE_RAMSTAGE
select COLLECT_TIMESTAMPS
diff --git a/src/soc/intel/broadwell/Kconfig b/src/soc/intel/broadwell/Kconfig
index 33644e8..1c29d77 100644
--- a/src/soc/intel/broadwell/Kconfig
+++ b/src/soc/intel/broadwell/Kconfig
@@ -11,7 +11,6 @@ config CPU_SPECIFIC_OPTIONS
select ARCH_VERSTAGE_X86_32
select ARCH_ROMSTAGE_X86_32
select ARCH_RAMSTAGE_X86_32
- select BACKUP_DEFAULT_SMM_REGION
select CACHE_MRC_SETTINGS
select MRC_SETTINGS_PROTECT
select CACHE_RELOCATED_RAMSTAGE_OUTSIDE_CBMEM if RELOCATABLE_RAMSTAGE
diff --git a/src/soc/intel/skylake/Kconfig b/src/soc/intel/skylake/Kconfig
index 302b575..0b7dc8c 100644
--- a/src/soc/intel/skylake/Kconfig
+++ b/src/soc/intel/skylake/Kconfig
@@ -12,7 +12,6 @@ config CPU_SPECIFIC_OPTIONS
select ARCH_ROMSTAGE_X86_32
select ARCH_VERSTAGE_X86_32
select ACPI_NHLT
- select BACKUP_DEFAULT_SMM_REGION
select CACHE_MRC_SETTINGS
select CACHE_RELOCATED_RAMSTAGE_OUTSIDE_CBMEM if RELOCATABLE_RAMSTAGE
select COLLECT_TIMESTAMPS
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14560
-gerrit
commit 9931d19e9b5ecf938ab718a2d300101f5eb3db7a
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Sat Apr 30 14:56:20 2016 -0500
cpu/x86/smm_module_loader: always build with SMM module support
The SMM module loader code was guarded by CONFIG_SMM_TSEG,
however that's not necessary. It's up to the chipset to take
advantage of the SMM module loading. It'll get optimized out
if the code isn't used anyway so just expose the declarations.
Change-Id: I6ba1b91d0c84febd4f1a92737b3d7303ab61b343
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/cpu/x86/smm/Makefile.inc | 3 +--
src/include/cpu/x86/smm.h | 6 +-----
2 files changed, 2 insertions(+), 7 deletions(-)
diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc
index c912a8f..72c9796 100644
--- a/src/cpu/x86/smm/Makefile.inc
+++ b/src/cpu/x86/smm/Makefile.inc
@@ -14,6 +14,7 @@
##
ramstage-$(CONFIG_BACKUP_DEFAULT_SMM_REGION) += backup_default_smm.c
+ramstage-y += smm_module_loader.c
ifeq ($(CONFIG_ARCH_RAMSTAGE_X86_32),y)
$(eval $(call create_class_compiler,smm,x86_32))
@@ -42,8 +43,6 @@ smmstub-y += smm_stub.S
smm-y += smm_module_handler.c
-ramstage-y += smm_module_loader.c
-
ramstage-srcs += $(obj)/cpu/x86/smm/smm.manual
ramstage-srcs += $(obj)/cpu/x86/smm/smmstub.manual
diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h
index 64759b0..2b13f8c 100644
--- a/src/include/cpu/x86/smm.h
+++ b/src/include/cpu/x86/smm.h
@@ -488,8 +488,6 @@ void smi_release_lock(void);
/* Get PMBASE address */
u16 smm_get_pmbase(void);
-#if CONFIG_SMM_TSEG
-
struct smm_runtime {
u32 smbase;
u32 save_state_size;
@@ -520,8 +518,8 @@ void asmlinkage smm_handler_start(void *params);
/* Retrieve SMM save state for a given CPU. WARNING: This does not take into
* account CPUs which are configured to not save their state to RAM. */
void *smm_get_save_state(int cpu);
+#endif /* __SMM__ */
-#else
/* SMM Module Loading API */
/* The smm_loader_params structure provides direction to the SMM loader:
@@ -559,8 +557,6 @@ struct smm_loader_params {
/* Both of these return 0 on success, < 0 on failure. */
int smm_setup_relocation_handler(struct smm_loader_params *params);
int smm_load_module(void *smram, int size, struct smm_loader_params *params);
-#endif /* __SMM__ */
-#endif /* CONFIG_SMM_TSEG */
/* Backup and restore default SMM region. */
void *backup_default_smm_area(void);
Aaron Durbin (adurbin(a)chromium.org) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14557
-gerrit
commit 4553450ba637c337c56311c37c13e94763ad915b
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Apr 29 22:55:49 2016 -0500
cpu/x86: combine multiprocessor and SMM initialization
In order to reduce code duplication provide a common flow
through callback functions that performs the multiprocessor
and optionally SMM initialization. The existing MP flight
records are utilized but a common flow is provided such
that the chipset/cpu only needs to provide a mp_ops
structure which has callbacks to gather info and provide
hooks at certain points in the sequence.
All current users of the MP code can be switched over to
this flow since there haven't been any flight records that
are overly complicated and long. After the conversion
has taken place most of the surface area of the MP
API can be hidden away within the compilation unit proper.
Change-Id: I6f70969631012982126f0d0d76e5fac6880c24f0
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/cpu/x86/mp_init.c | 242 +++++++++++++++++++++++++++++++++++++++++++++++
src/include/cpu/x86/mp.h | 96 +++++++++++++++++++
2 files changed, 338 insertions(+)
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c
index 2180d98..362cda3 100644
--- a/src/cpu/x86/mp_init.c
+++ b/src/cpu/x86/mp_init.c
@@ -607,3 +607,245 @@ void smm_initiate_relocation(void)
smm_initiate_relocation_parallel();
spin_unlock(&smm_relocation_lock);
}
+
+struct mp_state {
+ struct mp_ops ops;
+ int cpu_count;
+ uintptr_t perm_smbase;
+ size_t perm_smsize;
+ size_t smm_save_state_size;
+ int do_smm;
+} mp_state;
+
+static int is_smm_enabled(void)
+{
+ return IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) && mp_state.do_smm;
+}
+
+static void smm_disable(void)
+{
+ mp_state.do_smm = 0;
+}
+
+static void smm_enable(void)
+{
+ if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER))
+ mp_state.do_smm = 1;
+}
+
+static void asmlinkage smm_do_relocation(void *arg)
+{
+ const struct smm_module_params *p;
+ const struct smm_runtime *runtime;
+ int cpu;
+ uintptr_t curr_smbase;
+ uintptr_t perm_smbase;
+
+ p = arg;
+ runtime = p->runtime;
+ cpu = p->cpu;
+ curr_smbase = runtime->smbase;
+
+ if (cpu >= CONFIG_MAX_CPUS) {
+ printk(BIOS_CRIT,
+ "Invalid CPU number assigned in SMM stub: %d\n", cpu);
+ return;
+ }
+
+ /*
+ * The permanent handler runs with all cpus concurrently. Precalculate
+ * the location of the new SMBASE. If using SMM modules then this
+ * calculation needs to match that of the module loader.
+ */
+ perm_smbase = mp_state.perm_smbase;
+ perm_smbase -= cpu * runtime->save_state_size;
+
+ printk(BIOS_DEBUG, "New SMBASE 0x%08lx\n", perm_smbase);
+
+ /* Setup code checks this callback for validity. */
+ mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
+}
+
+static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params)
+{
+ int i;
+ struct smm_runtime *runtime = smm_params->runtime;
+
+ for (i = 0; i < CONFIG_MAX_CPUS; i++)
+ runtime->apic_id_to_cpu[i] = mp_get_apic_id(i);
+}
+
+static int install_relocation_handler(int num_cpus, size_t save_state_size)
+{
+ struct smm_loader_params smm_params = {
+ .per_cpu_stack_size = save_state_size,
+ .num_concurrent_stacks = num_cpus,
+ .per_cpu_save_state_size = save_state_size,
+ .num_concurrent_save_states = 1,
+ .handler = smm_do_relocation,
+ };
+
+ /* Allow callback to override parameters. */
+ if (mp_state.ops.adjust_smm_params != NULL)
+ mp_state.ops.adjust_smm_params(&smm_params, 0);
+
+ if (smm_setup_relocation_handler(&smm_params))
+ return -1;
+
+ adjust_smm_apic_id_map(&smm_params);
+
+ return 0;
+}
+
+static int install_permanent_handler(int num_cpus, uintptr_t smbase,
+ size_t smsize, size_t save_state_size)
+{
+ /* There are num_cpus concurrent stacks and num_cpus concurrent save
+ * state areas. Lastly, set the stack size to the save state size. */
+ struct smm_loader_params smm_params = {
+ .per_cpu_stack_size = save_state_size,
+ .num_concurrent_stacks = num_cpus,
+ .per_cpu_save_state_size = save_state_size,
+ .num_concurrent_save_states = num_cpus,
+ };
+
+ /* Allow callback to override parameters. */
+ if (mp_state.ops.adjust_smm_params != NULL)
+ mp_state.ops.adjust_smm_params(&smm_params, 1);
+
+ printk(BIOS_DEBUG, "Installing SMM handler to 0x%08lx\n", smbase);
+
+ if (smm_load_module((void *)smbase, smsize, &smm_params))
+ return -1;
+
+ adjust_smm_apic_id_map(&smm_params);
+
+ return 0;
+}
+
+/* Load SMM handlers as part of MP flight record. */
+static void load_smm_handlers(void)
+{
+ size_t smm_save_state_size = mp_state.smm_save_state_size;
+
+ /* Do nothing if SMM is disabled.*/
+ if (!is_smm_enabled())
+ return;
+
+ /* Install handlers. */
+ if (install_relocation_handler(mp_state.cpu_count,
+ smm_save_state_size) < 0) {
+ printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
+ smm_disable();
+ }
+
+ if (install_permanent_handler(mp_state.cpu_count, mp_state.perm_smbase,
+ mp_state.perm_smsize, smm_save_state_size) < 0) {
+ printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
+ smm_disable();
+ }
+
+ /* Ensure the SMM handlers hit DRAM before performing first SMI. */
+ wbinvd();
+
+ /*
+ * Indicate that the SMM handlers have been loaded and MP
+ * initialization is about to start.
+ */
+ if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL)
+ mp_state.ops.pre_mp_smm_init();
+}
+
+/* Trigger SMM as part of MP flight record. */
+static void trigger_smm_relocation(void)
+{
+ /* Do nothing if SMM is disabled.*/
+ if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL)
+ return;
+ /* Trigger SMM mode for the currently running processor. */
+ mp_state.ops.per_cpu_smm_trigger();
+}
+
+static struct mp_flight_record mp_steps[] = {
+ /* Once the APs are up load the SMM handlers. */
+ MP_FR_BLOCK_APS(NULL, load_smm_handlers),
+ /* Perform SMM relocation. */
+ MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation),
+ /* Initialize each cpu through the driver framework. */
+ MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
+ /* Wait for APs to finish everything else then let them park. */
+ MP_FR_BLOCK_APS(NULL, NULL),
+};
+
+static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
+{
+ /*
+ * Make copy of the ops so that defaults can be set in the non-const
+ * structure if needed.
+ */
+ memcpy(&state->ops, ops, sizeof(*ops));
+
+ if (ops->get_cpu_count != NULL)
+ state->cpu_count = ops->get_cpu_count();
+
+ if (ops->get_smm_info != NULL)
+ ops->get_smm_info(&state->perm_smbase, &state->perm_smsize,
+ &state->smm_save_state_size);
+
+ /*
+ * Default to smm_initiate_relocation() if trigger callback isn't
+ * provided.
+ */
+ if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) &&
+ ops->per_cpu_smm_trigger == NULL)
+ mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation;
+}
+
+int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops)
+{
+ int ret;
+ void *default_smm_area;
+ struct mp_params mp_params;
+
+ if (mp_ops->pre_mp_init != NULL)
+ mp_ops->pre_mp_init();
+
+ fill_mp_state(&mp_state, mp_ops);
+
+ memset(&mp_params, 0, sizeof(mp_params));
+
+ if (mp_state.cpu_count <= 0) {
+ printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count);
+ return -1;
+ }
+
+ /* Sanity check SMM state. */
+ if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
+ mp_state.ops.relocation_handler != NULL)
+ smm_enable();
+
+ if (is_smm_enabled())
+ printk(BIOS_INFO, "Will perform SMM setup.\n");
+
+ mp_params.num_cpus = mp_state.cpu_count;
+ /* Gather microcode information. */
+ if (mp_state.ops.get_microcode_info != NULL)
+ mp_state.ops.get_microcode_info(&mp_params.microcode_pointer,
+ &mp_params.parallel_microcode_load);
+ mp_params.adjust_apic_id = mp_state.ops.adjust_cpu_apic_entry;
+ mp_params.flight_plan = &mp_steps[0];
+ mp_params.num_records = ARRAY_SIZE(mp_steps);
+
+ /* Perform backup of default SMM area. */
+ default_smm_area = backup_default_smm_area();
+
+ ret = mp_init(cpu_bus, &mp_params);
+
+ restore_default_smm_area(default_smm_area);
+
+ /* Signal callback on success if it's provided. */
+ if (ret == 0 && mp_state.ops.post_mp_init != NULL)
+ mp_state.ops.post_mp_init();
+
+ return ret;
+}
diff --git a/src/include/cpu/x86/mp.h b/src/include/cpu/x86/mp.h
index 3227975..ff88a20 100644
--- a/src/include/cpu/x86/mp.h
+++ b/src/include/cpu/x86/mp.h
@@ -17,6 +17,7 @@
#define _X86_MP_H_
#include <arch/smp/atomic.h>
+#include <cpu/x86/smm.h>
#define CACHELINE_SIZE 64
@@ -78,6 +79,101 @@ struct mp_params {
int num_records;
};
+/* The sequence of the callbacks are in calling order. */
+struct mp_ops {
+ /*
+ * Optionally provide a callback prior to kicking off MP
+ * startup. This callback is done prior to loading the SIPI
+ * vector but after gathering the MP state information. Please
+ * see the sequence below.
+ */
+ void (*pre_mp_init)(void);
+ /*
+ * Return the number of logical x86 execution contexts that
+ * need to be brought out of SIPI state as well as have SMM
+ * handlers installed.
+ */
+ int (*get_cpu_count)(void);
+ /*
+ * Optionally fill in permanent SMM region and save state size. If
+ * this callback is not present no SMM handlers will be installed.
+ * The perm_smsize is the size available to house the permanent SMM
+ * handler.
+ */
+ void (*get_smm_info)(uintptr_t *perm_smbase, size_t *perm_smsize,
+ size_t *smm_save_state_size);
+ /*
+ * Optionally fill in pointer to microcode and indicate if the APs
+ * can load the microcode in parallel.
+ */
+ void (*get_microcode_info)(const void **microcode, int *parallel);
+ /*
+ * Optionally provide a function which adjusts the APIC id
+ * map to cpu number. By default the cpu number and APIC id
+ * are 1:1. To change the APIC id for a given cpu return the
+ * new APIC id. It's called for each cpu as indicated by
+ * get_cpu_count().
+ */
+ int (*adjust_cpu_apic_entry)(int cpu, int cur_apic_id);
+ /*
+ * Optionally adjust SMM handler parameters to override the default
+ * values. The is_perm variable indicates if the parameters to adjust
+ * are for the relocation handler or the permanent handler. This
+ * function is therefore called twice -- once for each handler.
+ * By default the parameters for each SMM handler are:
+ * stack_size num_concurrent_stacks num_concurrent_save_states
+ * relo: save_state_size get_cpu_count() 1
+ * perm: save_state_size get_cpu_count() get_cpu_count()
+ */
+ void (*adjust_smm_params)(struct smm_loader_params *slp, int is_perm);
+ /*
+ * Optionally provide a callback prior to the APs starting SMM
+ * relocation or cpu driver initialization. However, note that
+ * this callback is called after SMM handlers have been loaded.
+ */
+ void (*pre_mp_smm_init)(void);
+ /*
+ * Optional function to use to trigger SMM to perform relocation. If
+ * not provided, smm_initiate_relocation() is used.
+ */
+ void (*per_cpu_smm_trigger)(void);
+ /*
+ * This function is called while each cpu is in the SMM relocation
+ * handler. Its primary purpose is to adjust the SMBASE for the
+ * permanent handler. The parameters passed are the current cpu
+ * running the relocation handler, current SMBASE of relocation handler,
+ * and the pre-calculated staggered cpu SMBASE address of the permanent
+ * SMM handler.
+ */
+ void (*relocation_handler)(int cpu, uintptr_t curr_smbase,
+ uintptr_t staggered_smbase);
+ /*
+ * Optionally provide a callback that is called after the APs
+ * and the BSP have gone through the initialion sequence.
+ */
+ void (*post_mp_init)(void);
+};
+
+/*
+ * mp_init_with_smm() returns < 0 on failure and 0 on success. The mp_ops
+ * argument is used to drive the multiprocess initialization. Unless otherwise
+ * stated each callback is called on the BSP only. The sequence of operations
+ * is the following:
+ * 1. pre_mp_init()
+ * 2. get_cpu_count()
+ * 3. get_smm_info()
+ * 4. get_microcode_info()
+ * 5. adjust_cpu_apic_entry() for each number of get_cpu_count()
+ * 6. adjust_smm_params(is_perm=0)
+ * 7. adjust_smm_params(is_perm=1)
+ * 8. pre_mp_smm_init()
+ * 9. per_cpu_smm_trigger() in parallel for all cpus which calls
+ * relocation_handler() in SMM.
+ * 10. mp_initialize_cpu() for each cpu
+ * 11. post_mp_init()
+ */
+int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops);
+
/*
* mp_init() will set up the SIPI vector and bring up the APs according to
* mp_params. Each flight record will be executed according to the plan. Note
Werner Zeh (werner.zeh(a)siemens.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14579
-gerrit
commit 8c786ecaa679958c9975cd83ead344fa01ad7892
Author: Werner Zeh <werner.zeh(a)siemens.com>
Date: Tue May 3 09:52:12 2016 +0200
payload: Fix broken Linux kernel as payload
Commit 785a31d67e8f34065a2483080e4fd7032c3a8aad
(Makefile.inc: Move payload code to payloads/) breaks the usage of
Linux kernel as payload. The reason for it is that cbfs-files-y is
evaluated before payloads/external/Makefile.inc is sourced and as a
consequence ADDITIONAL_PAYLOAD_CONFIG is empty when it is used for
payload options. That leads to missing command line and initrd for
the kernel which in turn leads to kernel panic when it boots.
To avoid it, move the code which adds payload to cbfs completely to
payloads/extranal/Makefile.inc. This way, ADDITIONAL_PAYLOAD_CONFIG is
set right before the payload itself is added to cbfs-files-y.
I have tested this patch with a Linux kernel as well as with SeaBIOS as
payload on mc_tcu3 and it works. If someone sees impact to other
payloads just let me know.
Change-Id: I7aad352f8b3fc1fdba1875b12648b07eba14e282
Signed-off-by: Werner Zeh <werner.zeh(a)siemens.com>
---
Makefile.inc | 10 ----------
payloads/external/Makefile.inc | 10 ++++++++++
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/Makefile.inc b/Makefile.inc
index 0fb7a3d..508d0f9 100644
--- a/Makefile.inc
+++ b/Makefile.inc
@@ -806,16 +806,6 @@ $(CONFIG_CBFS_PREFIX)/ramstage-file := $(objcbfs)/ramstage.elf
$(CONFIG_CBFS_PREFIX)/ramstage-type := stage
$(CONFIG_CBFS_PREFIX)/ramstage-compression := $(CBFS_COMPRESS_FLAG)
-cbfs-files-y += $(CONFIG_CBFS_PREFIX)/payload
-$(CONFIG_CBFS_PREFIX)/payload-file := $(CONFIG_PAYLOAD_FILE)
-ifeq ($(CONFIG_PAYLOAD_IS_FLAT_BINARY),y)
-$(CONFIG_CBFS_PREFIX)/payload-type := flat-binary
-else
-$(CONFIG_CBFS_PREFIX)/payload-type := payload
-endif
-$(CONFIG_CBFS_PREFIX)/payload-compression := $(CBFS_PAYLOAD_COMPRESS_FLAG)
-$(CONFIG_CBFS_PREFIX)/payload-options := $(ADDITIONAL_PAYLOAD_CONFIG)
-
cbfs-files-$(CONFIG_HAVE_REFCODE_BLOB) += $(CONFIG_CBFS_PREFIX)/refcode
$(CONFIG_CBFS_PREFIX)/refcode-file := $(REFCODE_BLOB)
$(CONFIG_CBFS_PREFIX)/refcode-type := stage
diff --git a/payloads/external/Makefile.inc b/payloads/external/Makefile.inc
index 5ae9f7e..65b3dd2 100644
--- a/payloads/external/Makefile.inc
+++ b/payloads/external/Makefile.inc
@@ -42,6 +42,16 @@ ifneq ($(strip $(call strip_quotes,$(CONFIG_PAYLOAD_OPTIONS))),)
ADDITIONAL_PAYLOAD_CONFIG+=$(strip $(call strip_quotes,$(CONFIG_PAYLOAD_OPTIONS)))
endif
+cbfs-files-y += $(CONFIG_CBFS_PREFIX)/payload
+$(CONFIG_CBFS_PREFIX)/payload-file := $(CONFIG_PAYLOAD_FILE)
+ifeq ($(CONFIG_PAYLOAD_IS_FLAT_BINARY),y)
+$(CONFIG_CBFS_PREFIX)/payload-type := flat-binary
+else
+$(CONFIG_CBFS_PREFIX)/payload-type := payload
+endif
+$(CONFIG_CBFS_PREFIX)/payload-compression := $(CBFS_PAYLOAD_COMPRESS_FLAG)
+$(CONFIG_CBFS_PREFIX)/payload-options := $(ADDITIONAL_PAYLOAD_CONFIG)
+
cbfs-files-$(CONFIG_INCLUDE_CONFIG_FILE) += payload_config
payload_config-file := $(PAYLOAD_CONFIG)
payload_config-type := raw
Patrick Georgi (pgeorgi(a)google.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14573
-gerrit
commit 1b3f36c78f84daf4251545a97aa2a7a07cac4edb
Author: Patrick Georgi <pgeorgi(a)chromium.org>
Date: Mon May 2 17:27:01 2016 +0800
intel/baytrail: use fmap information for code caching
Instead of using CBFS_SIZE from Kconfig, use values generated from fmap.
While at it, make sure that the cached region size is a power of two.
fmap_config is also added to cpu_incs-y, but that doesn't hurt (except
for some miniscule increase in compile time) because it's #if-guarded.
The upside is that dependencies are tracked properly.
Change-Id: I03a919e1381ca3d0e972780b2c7d76c590aaa994
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
---
src/soc/intel/baytrail/romstage/Makefile.inc | 1 +
src/soc/intel/baytrail/romstage/cache_as_ram.inc | 4 +++-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/soc/intel/baytrail/romstage/Makefile.inc b/src/soc/intel/baytrail/romstage/Makefile.inc
index 5086a4e..aa10ba6 100644
--- a/src/soc/intel/baytrail/romstage/Makefile.inc
+++ b/src/soc/intel/baytrail/romstage/Makefile.inc
@@ -1,4 +1,5 @@
cpu_incs-y += $(src)/soc/intel/baytrail/romstage/cache_as_ram.inc
+cpu_incs-y += $(obj)/fmap_config.h
romstage-y += romstage.c
romstage-y += raminit.c
romstage-$(CONFIG_ENABLE_BUILTIN_COM1) += uart.c
diff --git a/src/soc/intel/baytrail/romstage/cache_as_ram.inc b/src/soc/intel/baytrail/romstage/cache_as_ram.inc
index 9ae10af..46bcc03 100644
--- a/src/soc/intel/baytrail/romstage/cache_as_ram.inc
+++ b/src/soc/intel/baytrail/romstage/cache_as_ram.inc
@@ -19,6 +19,8 @@
#include <cpu/x86/post_code.h>
#include <cbmem.h>
+#include "fmap_config.h"
+
/* The full cache-as-ram size includes the cache-as-ram portion from coreboot
* and the space used by the reference code. These 2 values combined should
* be a power of 2 because the MTRR setup assumes that. */
@@ -27,7 +29,7 @@
#define CACHE_AS_RAM_BASE CONFIG_DCACHE_RAM_BASE
/* Cache all of CBFS just below 4GiB as Write-Protect type. */
-#define CODE_CACHE_SIZE (CONFIG_CBFS_SIZE)
+#define CODE_CACHE_SIZE _ALIGN_UP_POW2(___FMAP__COREBOOT_SIZE)
#define CODE_CACHE_BASE (-CODE_CACHE_SIZE)
#define CODE_CACHE_MASK (~(CODE_CACHE_SIZE - 1))
#define CPU_PHYSMASK_HI ((1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1)
the following patch was just integrated into master:
commit 919be612b8383d75a1ea55db6552aa7648af66d1
Author: Patrick Georgi <pgeorgi(a)chromium.org>
Date: Mon May 2 17:02:53 2016 +0800
fmaptool: Export some fmap knowledge to the build environment
By exporting base and offset of CBFS-formatted fmap regions, the code
can use these when it's not prudent to do a runtime lookup.
Change-Id: I20523b5cea68880af4cb1fcea4b37bb8ac2a23db
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Reviewed-on: https://review.coreboot.org/14571
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
See https://review.coreboot.org/14571 for details.
-gerrit
the following patch was just integrated into master:
commit 849635d4ef835862b2fc4ecc4c4c1ee28186ad15
Author: Patrick Georgi <pgeorgi(a)chromium.org>
Date: Mon May 2 16:50:31 2016 +0800
build system: rename fmap.h to fmap_config.h
There's an in-tree fmap.h, and the file generated by fmaptool is likely
used in tandem with it. To avoid problems, rename the generated file
(which so far isn't used).
Change-Id: I95dfde513a7f78677cf18ecd7ce8745e40af316b
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Reviewed-on: https://review.coreboot.org/14570
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
See https://review.coreboot.org/14570 for details.
-gerrit
the following patch was just integrated into master:
commit fa5aba0484836cd19143b6567d8cee77ba072971
Author: Patrick Georgi <pgeorgi(a)chromium.org>
Date: Mon May 2 16:32:02 2016 +0800
arch/x86: Drop CBFS_BASE_ADDRESS
It's unused.
Change-Id: I50af2b50d2c5a7a24afe9099c5c01d17ce54a6c9
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Reviewed-on: https://review.coreboot.org/14569
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
See https://review.coreboot.org/14569 for details.
-gerrit
the following patch was just integrated into master:
commit 6f07ff10d1cfaf1ab3b2a0e2a652277a8189472c
Author: Patrick Georgi <pgeorgi(a)chromium.org>
Date: Mon May 2 16:31:31 2016 +0800
southbridge/amd: Drop HUDSON_FWM_INSIDE_CBFS
It's unused.
Change-Id: I853702e40dcab9f193b2a3de7deeec80ab1d25f0
Signed-off-by: Patrick Georgi <pgeorgi(a)chromium.org>
Reviewed-on: https://review.coreboot.org/14568
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin(a)chromium.org>
See https://review.coreboot.org/14568 for details.
-gerrit