Kyösti Mälkki has uploaded this change for review.

View Change

cpu,soc/intel: Refactor em64t101 SMI handlers

Change-Id: I177ca3b2966ccd985d72749ea2bd096ec4f233d6
Signed-off-by: Kyösti Mälkki <kyosti.malkki@gmail.com>
---
M src/cpu/intel/haswell/Makefile.inc
M src/cpu/intel/haswell/haswell_init.c
D src/cpu/intel/haswell/smmrelocate.c
M src/cpu/intel/model_1067x/mp_init.c
M src/cpu/intel/model_2065x/model_2065x_init.c
M src/cpu/intel/model_206ax/model_206ax_init.c
M src/cpu/intel/smm/Makefile.inc
A src/cpu/intel/smm/em64t101.c
M src/cpu/intel/smm/gen1/smmrelocate.c
M src/cpu/intel/smm/smm_reloc.c
A src/cpu/intel/smm/smmrelocate_cpu_save.c
M src/include/cpu/intel/em64t101_save_state.h
M src/include/cpu/intel/smm_reloc.h
M src/soc/intel/broadwell/Makefile.inc
M src/soc/intel/broadwell/cpu.c
D src/soc/intel/broadwell/smmrelocate.c
M src/soc/intel/cannonlake/Makefile.inc
M src/soc/intel/cannonlake/cpu.c
D src/soc/intel/cannonlake/smmrelocate.c
M src/soc/intel/denverton_ns/Makefile.inc
M src/soc/intel/denverton_ns/cpu.c
M src/soc/intel/icelake/Makefile.inc
M src/soc/intel/icelake/cpu.c
D src/soc/intel/icelake/smmrelocate.c
M src/soc/intel/skylake/Makefile.inc
M src/soc/intel/skylake/cpu.c
D src/soc/intel/skylake/smmrelocate.c
M src/soc/intel/tigerlake/Makefile.inc
D src/soc/intel/tigerlake/smmrelocate.c
M src/southbridge/intel/lynxpoint/Makefile.inc
30 files changed, 459 insertions(+), 1,860 deletions(-)

git pull ssh://review.coreboot.org:29418/coreboot refs/changes/08/37108/1
diff --git a/src/cpu/intel/haswell/Makefile.inc b/src/cpu/intel/haswell/Makefile.inc
index aebeed4..8ae0a65 100644
--- a/src/cpu/intel/haswell/Makefile.inc
+++ b/src/cpu/intel/haswell/Makefile.inc
@@ -3,7 +3,6 @@
romstage-y += ../car/romstage.c

ramstage-y += acpi.c
-ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smmrelocate.c

smm-y += finalize.c

diff --git a/src/cpu/intel/haswell/haswell_init.c b/src/cpu/intel/haswell/haswell_init.c
index 66dca28..e476a10 100644
--- a/src/cpu/intel/haswell/haswell_init.c
+++ b/src/cpu/intel/haswell/haswell_init.c
@@ -746,7 +746,7 @@
*parallel = 1;
}

-static void per_cpu_smm_trigger(void)
+static void alt_per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
smm_relocate();
@@ -759,25 +759,27 @@
{
/* Now that all APs have been relocated as well as the BSP let SMIs
* start flowing. */
- smm_southbridge_enable_smi();
+ if (CONFIG(HAVE_SMI_HANDLER))
+ smm_southbridge_enable_smi();

/* Lock down the SMRAM space. */
smm_lock();
}

-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = smm_initialize,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

void mp_init_cpus(struct bus *cpu_bus)
{
+ if (CONFIG(HAVE_SMI_HANDLER)) {
+ select_mp_ops_em64t101_detect(&mp_ops);
+ mp_ops.per_cpu_smm_trigger = alt_per_cpu_smm_trigger;
+ }
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
diff --git a/src/cpu/intel/haswell/smmrelocate.c b/src/cpu/intel/haswell/smmrelocate.c
deleted file mode 100644
index 88e5240..0000000
--- a/src/cpu/intel/haswell/smmrelocate.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <types.h>
-#include <string.h>
-#include <device/device.h>
-#include <device/pci.h>
-#include <device/pci_ops.h>
-#include <cpu/x86/cache.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
-#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/intel/smm_reloc.h>
-#include <console/console.h>
-#include <northbridge/intel/haswell/haswell.h>
-#include <southbridge/intel/lynxpoint/pch.h>
-#include "haswell.h"
-
-#define MSR_PRMRR_PHYS_BASE 0x1f4
-#define MSR_PRMRR_PHYS_MASK 0x1f5
-#define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4
-#define MSR_UNCORE_PRMRR_PHYS_MASK 0x2f5
-#define SMM_MCA_CAP_MSR 0x17d
-#define SMM_CPU_SVRSTR_BIT 57
-#define SMM_CPU_SVRSTR_MASK (1 << (SMM_CPU_SVRSTR_BIT - 32))
-#define SMM_FEATURE_CONTROL_MSR 0x4e0
-#define SMM_CPU_SAVE_EN (1 << 1)
-/* SMM save state MSRs */
-#define SMBASE_MSR 0xc20
-#define IEDBASE_MSR 0xc22
-
-#define SMRR_SUPPORTED (1 << 11)
-#define PRMRR_SUPPORTED (1 << 12)
-
-
-
-static void update_save_state(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase,
- struct smm_relocation_params *relo_params)
-{
- u32 smbase;
- u32 iedbase;
-
- /* The relocated handler runs with all CPUs concurrently. Therefore
- * stagger the entry points adjusting SMBASE downwards by save state
- * size * CPU num. */
- smbase = staggered_smbase;
- iedbase = relo_params->ied_base;
-
- printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
- smbase, iedbase);
-
- /* All threads need to set IEDBASE and SMBASE to the relocated
- * handler region. However, the save state location depends on the
- * smm_save_state_in_msrs field in the relocation parameters. If
- * smm_save_state_in_msrs is non-zero then the CPUs are relocating
- * the SMM handler in parallel, and each CPUs save state area is
- * located in their respective MSR space. If smm_save_state_in_msrs
- * is zero then the SMM relocation is happening serially so the
- * save state is at the same default location for all CPUs. */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smbase_msr;
- msr_t iedbase_msr;
-
- smbase_msr.lo = smbase;
- smbase_msr.hi = 0;
-
- /* According the BWG the IEDBASE MSR is in bits 63:32. It's
- * not clear why it differs from the SMBASE MSR. */
- iedbase_msr.lo = 0;
- iedbase_msr.hi = iedbase;
-
- wrmsr(SMBASE_MSR, smbase_msr);
- wrmsr(IEDBASE_MSR, iedbase_msr);
- } else {
- em64t101_smm_state_save_area_t *save_state;
-
- save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
- sizeof(*save_state));
-
- save_state->smbase = smbase;
- save_state->iedbase = iedbase;
- }
-}
-
-/* Returns 1 if SMM MSR save state was set. */
-static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
-{
- msr_t smm_mca_cap;
-
- smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
- if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.hi = 0;
- smm_feature_control.lo |= SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- relo_params->smm_save_state_in_msrs = 1;
- }
- return relo_params->smm_save_state_in_msrs;
-}
-
-/* The relocation work is actually performed in SMM context, but the code
- * resides in the ramstage module. This occurs by trampolining from the default
- * SMRAM entry point to here. */
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase)
-{
- msr_t mtrr_cap;
- struct smm_relocation_params *relo_params = &smm_reloc_params;
-
- printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu);
-
- /* Determine if the processor supports saving state in MSRs. If so,
- * enable it before the non-BSPs run so that SMM relocation can occur
- * in parallel in the non-BSP CPUs. */
- if (cpu == 0) {
- /* If smm_save_state_in_msrs is 1 then that means this is the
- * 2nd time through the relocation handler for the BSP.
- * Parallel SMM handler relocation is taking place. However,
- * it is desired to access other CPUs save state in the real
- * SMM handler. Therefore, disable the SMM save state in MSRs
- * feature. */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- } else if (bsp_setup_msr_save_state(relo_params))
- /* Just return from relocation handler if MSR save
- * state is enabled. In that case the BSP will come
- * back into the relocation handler to setup the new
- * SMBASE as well disabling SMM save state in MSRs. */
- return;
- }
-
- /* Make appropriate changes to the save state map. */
- update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
-
- /* Write PRMRR and SMRR MSRs based on indicated support. */
- mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (mtrr_cap.lo & SMRR_SUPPORTED)
- write_smrr(relo_params);
-
- if (mtrr_cap.lo & PRMRR_SUPPORTED) {
- write_prmrr(relo_params);
- /* UNCORE_PRMRR msrs are package level. Therefore, only
- * configure these MSRs on the BSP. */
- if (cpu == 0)
- write_uncore_prmrr(relo_params);
- }
-}
-
-static void fill_in_relocation_params(struct smm_relocation_params *params)
-{
- uintptr_t tseg_base;
- size_t tseg_size;
-
- u32 prmrr_base;
- u32 prmrr_size;
- int phys_bits;
- /* All range registers are aligned to 4KiB */
- const u32 rmask = ~((1 << 12) - 1);
-
- /* Some of the range registers are dependent on the number of physical
- * address bits supported. */
- phys_bits = cpuid_eax(0x80000008) & 0xff;
-
- /* The range bounded by the TSEGMB and BGSM registers encompasses the
- * SMRAM range as well as the IED range. However, the SMRAM available
- * to the handler is 4MiB since the IEDRAM lives TSEGMB + 4MiB.
- */
- smm_region(&tseg_base, &tseg_size);
-
- /* SMRR has 32-bits of valid address aligned to 4KiB. */
- params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
- params->smrr_base.hi = 0;
- params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
- params->smrr_mask.hi = 0;
-
- smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
-
- /* The PRMRR and UNCORE_PRMRR are at IEDBASE + 2MiB */
- prmrr_base = (params->ied_base + (2 << 20)) & rmask;
- prmrr_size = params->ied_size - (2 << 20);
-
- /* PRMRR has 46 bits of valid address aligned to 4KiB. It's dependent
- * on the number of physical address bits supported. */
- params->prmrr_base.lo = prmrr_base | MTRR_TYPE_WRBACK;
- params->prmrr_base.hi = 0;
- params->prmrr_mask.lo = (~(prmrr_size - 1) & rmask)
- | MTRR_PHYS_MASK_VALID;
- params->prmrr_mask.hi = (1 << (phys_bits - 32)) - 1;
-
- /* UNCORE_PRMRR has 39 bits of valid address aligned to 4KiB. */
- params->uncore_prmrr_base.lo = prmrr_base;
- params->uncore_prmrr_base.hi = 0;
- params->uncore_prmrr_mask.lo = (~(prmrr_size - 1) & rmask) |
- MTRR_PHYS_MASK_VALID;
- params->uncore_prmrr_mask.hi = (1 << (39 - 32)) - 1;
-}
-
-static void setup_ied_area(struct smm_relocation_params *params)
-{
- char *ied_base;
-
- struct ied_header ied = {
- .signature = "INTEL RSVD",
- .size = params->ied_size,
- .reserved = {0},
- };
-
- ied_base = (void *)params->ied_base;
-
- /* Place IED header at IEDBASE. */
- memcpy(ied_base, &ied, sizeof(ied));
-
- /* Zero out 32KiB at IEDBASE + 1MiB */
- memset(ied_base + (1 << 20), 0, (32 << 10));
-
- /* According to the BWG MP init section 2MiB of memory at IEDBASE +
- * 2MiB should be zeroed as well. However, I suspect what is intended
- * is to clear the memory covered by PRMRR. TODO(adurbin): figure out if
- * this is really required.
- */
- //memset(ied_base + (2 << 20), 0, (2 << 20));
-}
-
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
- size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
-
- fill_in_relocation_params(&smm_reloc_params);
-
- smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
-
- setup_ied_area(&smm_reloc_params);
-
- *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
-}
-
-void smm_initialize(void)
-{
- /* Clear the SMM state in the southbridge. */
- smm_southbridge_clear_state();
-
- /*
- * Run the relocation handler for on the BSP to check and set up
- * parallel SMM relocation.
- */
- smm_initiate_relocation();
-
- if (smm_reloc_params.smm_save_state_in_msrs)
- printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
-}
-
-/* The default SMM entry can happen in parallel or serially. If the
- * default SMM entry is done in parallel the BSP has already setup
- * the saving state to each CPU's MSRs. At least one save state size
- * is required for the initial SMM entry for the BSP to determine if
- * parallel SMM relocation is even feasible. */
-void smm_relocate(void)
-{
- /*
- * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
- * shall take place. Run the relocation handler a second time on the
- * BSP to do * the final move. For APs, a relocation handler always
- * needs to be run.
- */
- if (smm_reloc_params.smm_save_state_in_msrs)
- smm_initiate_relocation_parallel();
- else if (!boot_cpu())
- smm_initiate_relocation();
-}
diff --git a/src/cpu/intel/model_1067x/mp_init.c b/src/cpu/intel/model_1067x/mp_init.c
index dd44582..6876e14 100644
--- a/src/cpu/intel/model_1067x/mp_init.c
+++ b/src/cpu/intel/model_1067x/mp_init.c
@@ -50,42 +50,6 @@

/* the SMRR enable and lock bit need to be set in IA32_FEATURE_CONTROL
to enable SMRR so configure IA32_FEATURE_CONTROL early on */
-static void pre_mp_smm_init(void)
-{
- smm_initialize();
-}
-
-#define SMRR_SUPPORTED (1 << 11)
-
-static void per_cpu_smm_trigger(void)
-{
- msr_t mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (cpu_has_alternative_smrr() && mtrr_cap.lo & SMRR_SUPPORTED) {
- set_feature_ctrl_vmx();
- msr_t ia32_ft_ctrl = rdmsr(IA32_FEATURE_CONTROL);
- /* We don't care if the lock is already setting
- as our smm relocation handler is able to handle
- setups where SMRR is not enabled here. */
- if (ia32_ft_ctrl.lo & (1 << 0)) {
- /* IA32_FEATURE_CONTROL locked. If we set it again we
- get an illegal instruction. */
- printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked\n");
- printk(BIOS_DEBUG, "SMRR status: %senabled\n",
- ia32_ft_ctrl.lo & (1 << 3) ? "" : "not ");
- } else {
- if (!CONFIG(SET_IA32_FC_LOCK_BIT))
- printk(BIOS_INFO,
- "Overriding CONFIG_SET_IA32_FC_LOCK_BIT to enable SMRR\n");
- ia32_ft_ctrl.lo |= (1 << 3) | (1 << 0);
- wrmsr(IA32_FEATURE_CONTROL, ia32_ft_ctrl);
- }
- } else {
- set_vmx_and_lock();
- }
-
- /* Relocate the SMM handler. */
- smm_relocate();
-}

static void post_mp_init(void)
{
@@ -97,14 +61,10 @@
smm_lock();
}

-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = pre_mp_smm_init,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

@@ -112,6 +72,9 @@
{
microcode_patch = intel_microcode_find();

+ if (CONFIG(HAVE_SMI_HANDLER))
+ select_mp_ops_em64t101_alt(&mp_ops);
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
diff --git a/src/cpu/intel/model_2065x/model_2065x_init.c b/src/cpu/intel/model_2065x/model_2065x_init.c
index d7e84ec..cd93b19 100644
--- a/src/cpu/intel/model_2065x/model_2065x_init.c
+++ b/src/cpu/intel/model_2065x/model_2065x_init.c
@@ -300,7 +300,7 @@
*parallel = 1;
}

-static void per_cpu_smm_trigger(void)
+static void alt_per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
smm_relocate();
@@ -320,19 +320,20 @@
}


-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = smm_initialize,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

void mp_init_cpus(struct bus *cpu_bus)
{
+ if (CONFIG(HAVE_SMI_HANDLER)) {
+ select_mp_ops_em64t101(&mp_ops);
+ mp_ops.per_cpu_smm_trigger = alt_per_cpu_smm_trigger;
+ }
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
diff --git a/src/cpu/intel/model_206ax/model_206ax_init.c b/src/cpu/intel/model_206ax/model_206ax_init.c
index cc20676..41ef411 100644
--- a/src/cpu/intel/model_206ax/model_206ax_init.c
+++ b/src/cpu/intel/model_206ax/model_206ax_init.c
@@ -526,7 +526,7 @@
*parallel = 1;
}

-static void per_cpu_smm_trigger(void)
+static void alt_per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
smm_relocate();
@@ -546,19 +546,20 @@
}


-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = smm_initialize,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

void mp_init_cpus(struct bus *cpu_bus)
{
+ if (CONFIG(HAVE_SMI_HANDLER)) {
+ select_mp_ops_em64t101(&mp_ops);
+ mp_ops.per_cpu_smm_trigger = alt_per_cpu_smm_trigger;
+ }
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
diff --git a/src/cpu/intel/smm/Makefile.inc b/src/cpu/intel/smm/Makefile.inc
index 97fedc9..5fc801f 100644
--- a/src/cpu/intel/smm/Makefile.inc
+++ b/src/cpu/intel/smm/Makefile.inc
@@ -1,2 +1,4 @@
ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smm_reloc.c
+ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smmrelocate_cpu_save.c
ramstage-$(CONFIG_HAVE_SMI_HANDLER) += em64t100.c
+ramstage-$(CONFIG_HAVE_SMI_HANDLER) += em64t101.c
diff --git a/src/cpu/intel/smm/em64t101.c b/src/cpu/intel/smm/em64t101.c
new file mode 100644
index 0000000..9d1003f
--- /dev/null
+++ b/src/cpu/intel/smm/em64t101.c
@@ -0,0 +1,88 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <types.h>
+#include <cpu/x86/mp.h>
+#include <cpu/intel/em64t101_save_state.h>
+#include <cpu/intel/smm_reloc.h>
+
+/*
+ * ice, sky, cannon : without ied
+ * fsp_broadwell_de : ied with prmrr
+ * haswell, broadwell : ied with prmrr and uncore_prmrr
+ */
+
+static void std_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
+ size_t *smm_save_state_size)
+{
+ fill_in_smrr(&smm_reloc_params);
+ fill_in_ied_params(&smm_reloc_params, 0);
+
+ smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
+ *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
+}
+
+static void std_smm_initialize(void)
+{
+ /* Clear the SMM state in the southbridge. */
+ smm_southbridge_clear_state();
+
+ /* Run the relocation handler for on the BSP to check and set up
+ parallel SMM relocation. */
+ smm_initiate_relocation();
+
+ if (smm_reloc_params.smm_save_state_in_msrs)
+ printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
+}
+
+/* The relocation work is actually performed in SMM context, but the code
+ * resides in the ramstage module. This occurs by trampolining from the default
+ * SMRAM entry point to here. */
+static void std_smm_relocation_handler(int cpu, uintptr_t curr_smbase,
+ uintptr_t staggered_smbase)
+{
+ struct smm_relocation_params *relo_params = &smm_reloc_params;
+ em64t101_smm_state_save_area_t *save_state;
+
+ printk(BIOS_DEBUG, "In relocation handler: cpu %d SMBASE=0x%08x\n", cpu,
+ (u32)staggered_smbase);
+
+ /* Make appropriate changes to the save state map. */
+ save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - sizeof(*save_state));
+ save_state->smbase = staggered_smbase;
+ save_state->iedbase = relo_params->ied_base;
+
+ /* Write SMRR MSRs based on indicated support. */
+ write_smm_msrs(cpu, relo_params);
+}
+
+void select_mp_ops_em64t101(struct mp_ops *mp_ops)
+{
+ mp_ops->get_smm_info = std_smm_info;
+ mp_ops->relocation_handler = std_smm_relocation_handler;
+
+ mp_ops->pre_mp_smm_init = std_smm_initialize;
+ mp_ops->per_cpu_smm_trigger = smm_relocate;
+}
+
+void select_mp_ops_em64t101_alt(struct mp_ops *mp_ops)
+{
+ select_mp_ops_em64t101(mp_ops);
+ detect_em64t101_alternative_smrr(mp_ops);
+}
+
+void select_mp_ops_em64t101_detect(struct mp_ops *mp_ops)
+{
+ select_mp_ops_em64t101(mp_ops);
+ detect_em64t101_cpu_save(mp_ops);
+}
diff --git a/src/cpu/intel/smm/gen1/smmrelocate.c b/src/cpu/intel/smm/gen1/smmrelocate.c
index df54c3c..81eb17a 100644
--- a/src/cpu/intel/smm/gen1/smmrelocate.c
+++ b/src/cpu/intel/smm/gen1/smmrelocate.c
@@ -32,39 +32,71 @@

#define SMRR_SUPPORTED (1 << 11)

-
-
-/* On model_6fx, model_1067x and model_106cx SMRR functions slightly
- differently. The MSR are at different location from the rest
- and need to be explicitly enabled in IA32_FEATURE_CONTROL MSR. */
-bool cpu_has_alternative_smrr(void)
+static void alt_1_per_cpu_smm_trigger(void)
{
- struct cpuinfo_x86 c;
- get_fms(&c, cpuid_eax(1));
- if (c.x86 != 6)
- return false;
- switch (c.x86_model) {
- case 0xf:
- case 0x17: /* core2 */
- case 0x1c: /* Bonnell */
- return true;
- default:
- return false;
- }
+ set_vmx_and_lock();
+
+ /* Relocate the SMM handler. */
+ smm_relocate();
}

-static void write_smrr_alt(struct smm_relocation_params *relo_params)
+static void alt_2_per_cpu_smm_trigger(void)
{
- msr_t msr;
+ msr_t mtrr_cap = rdmsr(MTRR_CAP_MSR);
+ if (mtrr_cap.lo & SMRR_SUPPORTED) {
+ set_feature_ctrl_vmx();
+ msr_t ia32_ft_ctrl = rdmsr(IA32_FEATURE_CONTROL);
+ /* We don't care if the lock is already setting
+ as our smm relocation handler is able to handle
+ setups where SMRR is not enabled here. */
+ if (ia32_ft_ctrl.lo & (1 << 0)) {
+ /* IA32_FEATURE_CONTROL locked. If we set it again we
+ get an illegal instruction. */
+ printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked\n");
+ printk(BIOS_DEBUG, "SMRR status: %senabled\n",
+ ia32_ft_ctrl.lo & (1 << 3) ? "" : "not ");
+ } else {
+ if (!CONFIG(SET_IA32_FC_LOCK_BIT))
+ printk(BIOS_INFO,
+ "Overriding CONFIG_SET_IA32_FC_LOCK_BIT to enable SMRR\n");
+ ia32_ft_ctrl.lo |= (1 << 3) | (1 << 0);
+ wrmsr(IA32_FEATURE_CONTROL, ia32_ft_ctrl);
+ }
+ }
+
+ /* Relocate the SMM handler. */
+ smm_relocate();
+}
+
+static void alt_smm_relocation_handler(int cpu, uintptr_t curr_smbase,
+ uintptr_t staggered_smbase)
+{
+ struct smm_relocation_params *relo_params = &smm_reloc_params;
+ em64t101_smm_state_save_area_t *save_state;
+ msr_t mtrr_cap, msr;
+
+ printk(BIOS_DEBUG, "In relocation handler: cpu %d SMBASE=0x%08x\n", cpu,
+ (u32)staggered_smbase);
+
+ /* Make appropriate changes to the save state map. */
+ save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - sizeof(*save_state));
+ save_state->smbase = staggered_smbase;
+ save_state->iedbase = relo_params->ied_base;
+
+ /* Write SMRR MSRs based on indicated support. */
+ mtrr_cap = rdmsr(MTRR_CAP_MSR);
+ if (!(mtrr_cap & SMRR_SUPPORTED))
+ return;
+
msr = rdmsr(IA32_FEATURE_CONTROL);
/* SMRR enabled and feature locked */
- if (!((msr.lo & SMRR_ENABLE)
- && (msr.lo & FEATURE_CONTROL_LOCK_BIT))) {
- printk(BIOS_WARNING,
- "SMRR not enabled, skip writing SMRR...\n");
+ if (!((msr.lo & SMRR_ENABLE) && (msr.lo & FEATURE_CONTROL_LOCK_BIT))) {
+ printk(BIOS_WARNING, "SMRR not enabled, skip writing SMRR...\n");
return;
}

+ relo_params->smrr_mask.lo &= ~((1 << 12) - 1);
+
printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
relo_params->smrr_base.lo, relo_params->smrr_mask.lo);

@@ -72,142 +104,28 @@
wrmsr(MSR_SMRR_PHYS_MASK, relo_params->smrr_mask);
}

-static void fill_in_relocation_params(struct smm_relocation_params *params)
+/* On model_6fx, model_1067x and model_106cx SMRR functions slightly
+ differently. The MSR are at different location from the rest
+ and need to be explicitly enabled in IA32_FEATURE_CONTROL MSR. */
+void detect_em64t101_alternative_smrr(struct mp_ops *mp_ops)
{
- uintptr_t tseg_base;
- size_t tseg_size;
+ struct cpuinfo_x86 c;

- /* All range registers are aligned to 4KiB */
- const u32 rmask = ~((1 << 12) - 1);
+ mp_ops->per_cpu_smm_trigger = alt_1_per_cpu_smm_trigger;

- smm_region(&tseg_base, &tseg_size);
+ get_fms(&c, cpuid_eax(1));
+ if (c.x86 != 6)
+ return;

- if (!IS_ALIGNED(tseg_base, tseg_size)) {
- printk(BIOS_WARNING,
- "TSEG base not aligned with TSEG SIZE! Not setting SMRR\n");
+ switch (c.x86_model) {
+ case 0xf:
+ case 0x17: /* core2 */
+ case 0x1c: /* Bonnell */
+ break;
+ default:
return;
}

- /* SMRR has 32-bits of valid address aligned to 4KiB. */
- params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
- params->smrr_base.hi = 0;
- params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
- params->smrr_mask.hi = 0;
-
- /* On model_6fx and model_1067x bits [0:11] on smrr_base are reserved */
- if (cpu_has_alternative_smrr())
- params->smrr_base.lo &= rmask;
-
- smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
-}
-
-static void setup_ied_area(struct smm_relocation_params *params)
-{
- char *ied_base;
-
- struct ied_header ied = {
- .signature = "INTEL RSVD",
- .size = params->ied_size,
- .reserved = {0},
- };
-
- ied_base = (void *)params->ied_base;
-
- /* Place IED header at IEDBASE. */
- memcpy(ied_base, &ied, sizeof(ied));
-
- /* Zero out 32KiB at IEDBASE + 1MiB */
- memset(ied_base + (1 << 20), 0, (32 << 10));
-}
-
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
- size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
-
- fill_in_relocation_params(&smm_reloc_params);
-
- smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
-
- if (smm_reloc_params.ied_size)
- setup_ied_area(&smm_reloc_params);
-
- /* This may not be be correct for older CPU's supported by this code,
- but given that em64t101_smm_state_save_area_t is larger than the
- save_state of these CPU's it works. */
- *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
-}
-
-void smm_initialize(void)
-{
- /* Clear the SMM state in the southbridge. */
- smm_southbridge_clear_state();
-
- /*
- * Run the relocation handler for on the BSP to check and set up
- * parallel SMM relocation.
- */
- smm_initiate_relocation();
-}
-
-/* The relocation work is actually performed in SMM context, but the code
- * resides in the ramstage module. This occurs by trampolining from the default
- * SMRAM entry point to here. */
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase)
-{
- msr_t mtrr_cap;
- struct smm_relocation_params *relo_params = &smm_reloc_params;
- /* The em64t101 save state is sufficiently compatible with older
- save states with regards of smbase, smm_revision. */
- em64t101_smm_state_save_area_t *save_state;
- u32 smbase = staggered_smbase;
- u32 iedbase = relo_params->ied_base;
-
- printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu);
-
- /* Make appropriate changes to the save state map. */
- if (relo_params->ied_size)
- printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
- smbase, iedbase);
- else
- printk(BIOS_DEBUG, "New SMBASE=0x%08x\n",
- smbase);
-
- save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
- sizeof(*save_state));
- save_state->smbase = smbase;
-
- printk(BIOS_SPEW, "SMM revision: 0x%08x\n", save_state->smm_revision);
- if (save_state->smm_revision == 0x00030101)
- save_state->iedbase = iedbase;
-
- /* Write EMRR and SMRR MSRs based on indicated support. */
- mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (!(mtrr_cap.lo & SMRR_SUPPORTED))
- return;
-
- if (cpu_has_alternative_smrr())
- write_smrr_alt(relo_params);
- else
- write_smrr(relo_params);
-}
-
-/*
- * The default SMM entry can happen in parallel or serially. If the
- * default SMM entry is done in parallel the BSP has already setup
- * the saving state to each CPU's MSRs. At least one save state size
- * is required for the initial SMM entry for the BSP to determine if
- * parallel SMM relocation is even feasible.
- */
-void smm_relocate(void)
-{
- /*
- * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
- * shall take place. Run the relocation handler a second time on the
- * BSP to do the final move. For APs, a relocation handler always
- * needs to be run.
- */
- if (!boot_cpu())
- smm_initiate_relocation();
+ mp_ops->relocation_handler = alt_smm_relocation_handler;
+ mp_ops->per_cpu_smm_trigger = alt_2_per_cpu_smm_trigger;
}
diff --git a/src/cpu/intel/smm/smm_reloc.c b/src/cpu/intel/smm/smm_reloc.c
index 860c095..74908af 100644
--- a/src/cpu/intel/smm/smm_reloc.c
+++ b/src/cpu/intel/smm/smm_reloc.c
@@ -11,6 +11,142 @@
* GNU General Public License for more details.
*/

+#include <string.h>
+#include <types.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/smm.h>
#include <cpu/intel/smm_reloc.h>

struct smm_relocation_params smm_reloc_params;
+
+void fill_in_smrr(struct smm_relocation_params *params)
+{
+ uintptr_t tseg_base;
+ size_t tseg_size;
+
+ /* All range registers are aligned to 4KiB */
+ const u32 rmask = ~((4 * KiB) - 1);
+
+ smm_region(&tseg_base, &tseg_size);
+
+ /* SMRR has 32-bits of valid address aligned to 4KiB. */
+ params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
+ params->smrr_base.hi = 0;
+ params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
+ params->smrr_mask.hi = 0;
+}
+
+/* The default SMM entry can happen in parallel or serially. If the
+ * default SMM entry is done in parallel the BSP has already setup
+ * the saving state to each CPU's MSRs. At least one save state size
+ * is required for the initial SMM entry for the BSP to determine if
+ * parallel SMM relocation is even feasible. */
+void smm_relocate(void)
+{
+ /*
+ * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
+ * shall take place. Run the relocation handler a second time on the
+ * BSP to do * the final move. For APs, a relocation handler always
+ * needs to be run.
+ */
+ if (smm_reloc_params.smm_save_state_in_msrs)
+ smm_initiate_relocation_parallel();
+ else if (!boot_cpu())
+ smm_initiate_relocation();
+}
+
+static void setup_ied_region(char *ied_base, size_t ied_size)
+{
+ struct ied_header ied = {
+ .signature = "INTEL RSVD",
+ .size = ied_size,
+ .reserved = {0},
+ };
+
+ /* Place IED header at IEDBASE. */
+ memcpy(ied_base, &ied, sizeof(ied));
+
+ /* Zero out 32KiB at IEDBASE + 1MiB */
+ memset(ied_base + (1 << 20), 0, (32 << 10));
+}
+
+void fill_in_ied_params(struct smm_relocation_params *params)
+{
+ u32 prmrr_base;
+ u32 prmrr_size;
+ int phys_bits;
+
+ smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
+ if (!params->ied_size)
+ return;
+
+ setup_ied_region((char *)params->ied_base, params->ied_size);
+
+ if (!(caps & PRMRR_SUPPORTED))
+ return;
+
+ /* Some of the range registers are dependent on the number of physical
+ address bits supported. */
+ phys_bits = cpuid_eax(0x80000008) & 0xff;
+
+ /* The PRMRR is at IEDBASE + 2MiB */
+ prmrr_base = (params->ied_base + (2 << 20)) & rmask;
+ prmrr_size = params->ied_size - (2 << 20);
+
+ /* PRMRR has 46 bits of valid address aligned to 4KiB. It's dependent
+ on the number of physical address bits supported. */
+ params->prmrr_base.lo = prmrr_base | MTRR_TYPE_WRBACK;
+ params->prmrr_base.hi = 0;
+ params->prmrr_mask.lo = (~(prmrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
+ params->prmrr_mask.hi = (1 << (phys_bits - 32)) - 1;
+
+ /* UNCORE_PRMRR has 39 bits of valid address aligned to 4KiB. */
+ params->uncore_prmrr_base.lo = prmrr_base;
+ params->uncore_prmrr_base.hi = 0;
+ params->uncore_prmrr_mask.lo = (~(prmrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
+ params->uncore_prmrr_mask.hi = (1 << (39 - 32)) - 1;
+
+ return;
+}
+
+#define MSR_PRMRR_PHYS_BASE 0x1f4
+#define MSR_PRMRR_PHYS_MASK 0x1f5
+#define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4
+#define MSR_UNCORE_PRMRR_PHYS_MASK 0x2f5
+
+void write_smm_msrs(int cpu, struct smm_relocation_params *relo_params)
+{
+ msr_t mtrr_cap;
+ mtrr_cap = rdmsr(MTRR_CAP_MSR);
+ if (mtrr_cap.lo & SMRR_SUPPORTED) {
+ if (!cpu) {
+ printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
+ relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
+ }
+ wrmsr(IA32_SMRR_PHYS_BASE, relo_params->smrr_base);
+ wrmsr(IA32_SMRR_PHYS_MASK, relo_params->smrr_mask);
+ }
+
+ if (mtrr_cap.lo & PRMRR_SUPPORTED) {
+
+ if (!cpu) {
+ printk(BIOS_DEBUG, "Writing PRMRR. base = 0x%08x, mask=0x%08x\n",
+ relo_params->prmrr_base.lo, relo_params->prmrr_mask.lo);
+ }
+ wrmsr(MSR_PRMRR_PHYS_BASE, relo_params->prmrr_base);
+ wrmsr(MSR_PRMRR_PHYS_MASK, relo_params->prmrr_mask);
+
+ if (cpu)
+ return;
+
+ /* UNCORE_PRMRR msrs are package level. Therefore, only
+ * configure these MSRs on the BSP. */
+ printk(BIOS_DEBUG,
+ "Writing UNCORE_PRMRR. base = 0x%08x, mask=0x%08x\n",
+ relo_params->uncore_prmrr_base.lo,
+ relo_params->uncore_prmrr_mask.lo);
+ wrmsr(MSR_UNCORE_PRMRR_PHYS_BASE, relo_params->uncore_prmrr_base);
+ wrmsr(MSR_UNCORE_PRMRR_PHYS_MASK, relo_params->uncore_prmrr_mask);
+ }
+}
diff --git a/src/cpu/intel/smm/smmrelocate_cpu_save.c b/src/cpu/intel/smm/smmrelocate_cpu_save.c
new file mode 100644
index 0000000..b1a04a1
--- /dev/null
+++ b/src/cpu/intel/smm/smmrelocate_cpu_save.c
@@ -0,0 +1,97 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <types.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/mp.h>
+#include <cpu/intel/smm_reloc.h>
+
+/* All threads need to set IEDBASE and SMBASE to the relocated
+ * handler region. However, the save state location depends on the
+ * smm_save_state_in_msrs field in the relocation parameters. If
+ * smm_save_state_in_msrs is non-zero then the CPUs are relocating
+ * the SMM handler in parallel, and each CPUs save state area is
+ * located in their respective MSR space. If smm_save_state_in_msrs
+ * is zero then the SMM relocation is happening serially so the
+ * save state is at the same default location for all CPUs. */
+
+static int switch_to_cpu_save(struct smm_relocation_params *relo_params)
+{
+ msr_t smm_feature_control;
+
+ /* If smm_save_state_in_msrs is 1 then that means this is the
+ * 2nd time through the relocation handler for the BSP.
+ * Parallel SMM handler relocation is taking place. However,
+ * it is desired to access other CPUs save state in the real
+ * SMM handler. Therefore, disable the SMM save state in MSRs
+ * feature. */
+ smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
+ if (!relo_params->smm_save_state_in_msrs) {
+ smm_feature_control.hi = 0;
+ smm_feature_control.lo |= SMM_CPU_SAVE_EN;
+ wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
+ relo_params->smm_save_state_in_msrs = 1;
+ return relo_params->smm_save_state_in_msrs;
+ } else {
+ smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
+ wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
+ return 0;
+ }
+}
+
+/* The relocation work is actually performed in SMM context, but the code
+ * resides in the ramstage module. This occurs by trampolining from the default
+ * SMRAM entry point to here. */
+static void std_smm_relocation_handler(int cpu, uintptr_t curr_smbase,
+ uintptr_t staggered_smbase)
+{
+ struct smm_relocation_params *relo_params = &smm_reloc_params;
+ msr_t base_msr;
+
+ printk(BIOS_DEBUG, "In relocation handler: cpu %d SMBASE=0x%08x\n", cpu,
+ (u32)staggered_smbase);
+
+ /* The processor supports saving state in MSRs. Enable it before the non-BSPs
+ * run so that SMM relocation can occur in parallel in the non-BSP CPUs. */
+
+ /* If MSR save state was enabled, just return. In that case the BSP will come
+ * back into the relocation handler to setup the new SMBASE as well disabling
+ * SMM save state in MSRs. */
+ if ((cpu == 0) && switch_cpu_save_en(relo_params))
+ return;
+
+ /* The relocated handler runs with all CPUs concurrently. Therefore
+ * stagger the entry points adjusting SMBASE downwards by save state
+ * size * CPU num. */
+
+ base_msr.lo = staggered_smbase;
+ base_msr.hi = 0;
+ wrmsr(SMBASE_MSR, base_msr);
+
+ /* According the BWG the IEDBASE MSR is in bits 63:32. It's
+ * not clear why it differs from the SMBASE MSR. */
+ base_msr.lo = 0;
+ base_msr.hi = relo_params->ied_base;
+ wrmsr(IEDBASE_MSR, base_msr);
+
+ /* Write SMRR MSRs based on indicated support. */
+ write_smm_msrs(cpu, relo_params);
+}
+
+void detect_smm_cpu_svrstr(struct mp_ops *mp_ops)
+{
+ smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
+
+ if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK)
+ mp_ops->relocation_handler = std_smm_relocation_handler;
+}
diff --git a/src/include/cpu/intel/em64t101_save_state.h b/src/include/cpu/intel/em64t101_save_state.h
index 7493c85..cb896ef 100644
--- a/src/include/cpu/intel/em64t101_save_state.h
+++ b/src/include/cpu/intel/em64t101_save_state.h
@@ -118,4 +118,8 @@
u64 cr0;
} __packed em64t101_smm_state_save_area_t;

+struct mp_ops;
+void select_mp_ops_em64t101(struct mp_ops *mp_ops);
+void select_mp_ops_em64t101_alt(struct mp_ops *mp_ops);
+
#endif
diff --git a/src/include/cpu/intel/smm_reloc.h b/src/include/cpu/intel/smm_reloc.h
index 68fe33f..49333eb 100644
--- a/src/include/cpu/intel/smm_reloc.h
+++ b/src/include/cpu/intel/smm_reloc.h
@@ -17,7 +17,6 @@
#include <console/console.h>
#include <types.h>
#include <cpu/x86/msr.h>
-#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>

struct smm_relocation_params {
@@ -46,6 +45,7 @@
} __packed;

void fill_in_smrr(struct smm_relocation_params *params);
+void fill_in_ied_params(struct smm_relocation_params *params);

/* These helpers are for performing SMM relocation. */
void smm_relocate(void);
@@ -80,42 +80,4 @@
void smm_southbridge_enable_smi(void);
void smm_southbridge_enable(uint16_t pm1_events);

-/* To be removed. */
-void smm_initialize(void);
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size);
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase);
-
-bool cpu_has_alternative_smrr(void);
-
-#define MSR_PRMRR_PHYS_BASE 0x1f4
-#define MSR_PRMRR_PHYS_MASK 0x1f5
-#define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4
-#define MSR_UNCORE_PRMRR_PHYS_MASK 0x2f5
-
-static inline void write_smrr(struct smm_relocation_params *relo_params)
-{
- printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
- relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
- wrmsr(IA32_SMRR_PHYS_BASE, relo_params->smrr_base);
- wrmsr(IA32_SMRR_PHYS_MASK, relo_params->smrr_mask);
-}
-
-static inline void write_prmrr(struct smm_relocation_params *relo_params)
-{
- printk(BIOS_DEBUG, "Writing PRMRR. base = 0x%08x, mask=0x%08x\n",
- relo_params->prmrr_base.lo, relo_params->prmrr_mask.lo);
- wrmsr(MSR_PRMRR_PHYS_BASE, relo_params->prmrr_base);
- wrmsr(MSR_PRMRR_PHYS_MASK, relo_params->prmrr_mask);
-}
-
-static inline void write_uncore_prmrr(struct smm_relocation_params *relo_params)
-{
- printk(BIOS_DEBUG,
- "Writing UNCORE_PRMRR. base = 0x%08x, mask=0x%08x\n",
- relo_params->uncore_prmrr_base.lo,
- relo_params->uncore_prmrr_mask.lo);
- wrmsr(MSR_UNCORE_PRMRR_PHYS_BASE, relo_params->uncore_prmrr_base);
- wrmsr(MSR_UNCORE_PRMRR_PHYS_MASK, relo_params->uncore_prmrr_mask);
-}
-
#endif
diff --git a/src/soc/intel/broadwell/Makefile.inc b/src/soc/intel/broadwell/Makefile.inc
index 03aa3fb..692dc1c 100644
--- a/src/soc/intel/broadwell/Makefile.inc
+++ b/src/soc/intel/broadwell/Makefile.inc
@@ -57,7 +57,11 @@
romstage-y += smbus_common.c
ramstage-y += smi.c
smm-y += smihandler.c
-ramstage-y += smmrelocate.c
+bootblock-y += spi.c
+romstage-y += spi.c
+postcar-y += spi.c
+ramstage-y += spi.c
+smm-$(CONFIG_SPI_FLASH_SMM) += spi.c
ramstage-y += systemagent.c
bootblock-y += tsc_freq.c
ramstage-y += tsc_freq.c
diff --git a/src/soc/intel/broadwell/cpu.c b/src/soc/intel/broadwell/cpu.c
index 287b5b5..48bfdfc 100644
--- a/src/soc/intel/broadwell/cpu.c
+++ b/src/soc/intel/broadwell/cpu.c
@@ -635,7 +635,7 @@
*parallel = 1;
}

-static void per_cpu_smm_trigger(void)
+static void alt_per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
smm_relocate();
@@ -651,20 +651,17 @@

/* Now that all APs have been relocated as well as the BSP let SMIs
* start flowing. */
- smm_southbridge_enable_smi();
+ if (CONFIG(HAVE_SMI_HANDLER))
+ smm_southbridge_enable_smi();

/* Lock down the SMRAM space. */
smm_lock();
}

-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = smm_initialize,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

@@ -672,6 +669,11 @@
{
struct bus *cpu_bus = dev->link_list;

+ if (CONFIG(HAVE_SMI_HANDLER)) {
+ select_mp_ops_em64t101_detect(&mp_ops);
+ mp_ops.per_cpu_smm_trigger = alt_per_cpu_smm_trigger;
+ }
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
diff --git a/src/soc/intel/broadwell/smmrelocate.c b/src/soc/intel/broadwell/smmrelocate.c
deleted file mode 100644
index 0df9289..0000000
--- a/src/soc/intel/broadwell/smmrelocate.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2014 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <types.h>
-#include <string.h>
-#include <device/device.h>
-#include <device/pci.h>
-#include <device/pci_ops.h>
-#include <cpu/x86/cache.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
-#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/intel/smm_reloc.h>
-#include <console/console.h>
-#include <soc/cpu.h>
-#include <soc/msr.h>
-#include <soc/pci_devs.h>
-#include <soc/systemagent.h>
-
-
-static void update_save_state(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase,
- struct smm_relocation_params *relo_params)
-{
- u32 smbase;
- u32 iedbase;
-
- /* The relocated handler runs with all CPUs concurrently. Therefore
- * stagger the entry points adjusting SMBASE downwards by save state
- * size * CPU num. */
- smbase = staggered_smbase;
- iedbase = relo_params->ied_base;
-
- printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
- smbase, iedbase);
-
- /* All threads need to set IEDBASE and SMBASE to the relocated
- * handler region. However, the save state location depends on the
- * smm_save_state_in_msrs field in the relocation parameters. If
- * smm_save_state_in_msrs is non-zero then the CPUs are relocating
- * the SMM handler in parallel, and each CPUs save state area is
- * located in their respective MSR space. If smm_save_state_in_msrs
- * is zero then the SMM relocation is happening serially so the
- * save state is at the same default location for all CPUs. */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smbase_msr;
- msr_t iedbase_msr;
-
- smbase_msr.lo = smbase;
- smbase_msr.hi = 0;
-
- /* According the BWG the IEDBASE MSR is in bits 63:32. It's
- * not clear why it differs from the SMBASE MSR. */
- iedbase_msr.lo = 0;
- iedbase_msr.hi = iedbase;
-
- wrmsr(SMBASE_MSR, smbase_msr);
- wrmsr(IEDBASE_MSR, iedbase_msr);
- } else {
- em64t101_smm_state_save_area_t *save_state;
-
- save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
- sizeof(*save_state));
-
- save_state->smbase = smbase;
- save_state->iedbase = iedbase;
- }
-}
-
-/* Returns 1 if SMM MSR save state was set. */
-static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
-{
- msr_t smm_mca_cap;
-
- smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
- if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.hi = 0;
- smm_feature_control.lo |= SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- relo_params->smm_save_state_in_msrs = 1;
- }
- return relo_params->smm_save_state_in_msrs;
-}
-
-/* The relocation work is actually performed in SMM context, but the code
- * resides in the ramstage module. This occurs by trampolining from the default
- * SMRAM entry point to here. */
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase)
-{
- msr_t mtrr_cap;
- struct smm_relocation_params *relo_params = &smm_reloc_params;
-
- printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
-
- /* Determine if the processor supports saving state in MSRs. If so,
- * enable it before the non-BSPs run so that SMM relocation can occur
- * in parallel in the non-BSP CPUs. */
- if (cpu == 0) {
- /* If smm_save_state_in_msrs is 1 then that means this is the
- * 2nd time through the relocation handler for the BSP.
- * Parallel SMM handler relocation is taking place. However,
- * it is desired to access other CPUs save state in the real
- * SMM handler. Therefore, disable the SMM save state in MSRs
- * feature. */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- } else if (bsp_setup_msr_save_state(relo_params))
- /* Just return from relocation handler if MSR save
- * state is enabled. In that case the BSP will come
- * back into the relocation handler to setup the new
- * SMBASE as well disabling SMM save state in MSRs. */
- return;
- }
-
- /* Make appropriate changes to the save state map. */
- update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
-
- /* Write PRMRR and SMRR MSRs based on indicated support. */
- mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (mtrr_cap.lo & SMRR_SUPPORTED)
- write_smrr(relo_params);
-
- if (mtrr_cap.lo & PRMRR_SUPPORTED) {
- write_prmrr(relo_params);
- /* UNCORE_PRMRR msrs are package level. Therefore, only
- * configure these MSRs on the BSP. */
- if (cpu == 0)
- write_uncore_prmrr(relo_params);
- }
-}
-
-static void fill_in_relocation_params(struct smm_relocation_params *params)
-{
- uintptr_t tseg_base;
- size_t tseg_size;
- u32 prmrr_base;
- u32 prmrr_size;
- int phys_bits;
- /* All range registers are aligned to 4KiB */
- const u32 rmask = ~((1 << 12) - 1);
-
- /* Some of the range registers are dependent on the number of physical
- * address bits supported. */
- phys_bits = cpuid_eax(0x80000008) & 0xff;
-
- /* The range bounded by the TSEGMB and BGSM registers encompasses the
- * SMRAM range as well as the IED range. However, the SMRAM available
- * to the handler is 4MiB since the IEDRAM lives TSEGMB + 4MiB.
- */
- smm_region(&tseg_base, &tseg_size);
-
- /* SMRR has 32-bits of valid address aligned to 4KiB. */
- params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
- params->smrr_base.hi = 0;
- params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
- params->smrr_mask.hi = 0;
-
- smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
-
- /* The PRMRR and UNCORE_PRMRR are at IEDBASE + 2MiB */
- prmrr_base = (params->ied_base + (2 << 20)) & rmask;
- prmrr_size = params->ied_size - (2 << 20);
-
- /* PRMRR has 46 bits of valid address aligned to 4KiB. It's dependent
- * on the number of physical address bits supported. */
- params->prmrr_base.lo = prmrr_base | MTRR_TYPE_WRBACK;
- params->prmrr_base.hi = 0;
- params->prmrr_mask.lo = (~(prmrr_size - 1) & rmask)
- | MTRR_PHYS_MASK_VALID;
- params->prmrr_mask.hi = (1 << (phys_bits - 32)) - 1;
-
- /* UNCORE_PRMRR has 39 bits of valid address aligned to 4KiB. */
- params->uncore_prmrr_base.lo = prmrr_base;
- params->uncore_prmrr_base.hi = 0;
- params->uncore_prmrr_mask.lo = (~(prmrr_size - 1) & rmask) |
- MTRR_PHYS_MASK_VALID;
- params->uncore_prmrr_mask.hi = (1 << (39 - 32)) - 1;
-}
-
-static void setup_ied_area(struct smm_relocation_params *params)
-{
- char *ied_base;
-
- struct ied_header ied = {
- .signature = "INTEL RSVD",
- .size = params->ied_size,
- .reserved = {0},
- };
-
- ied_base = (void *)params->ied_base;
-
- /* Place IED header at IEDBASE. */
- memcpy(ied_base, &ied, sizeof(ied));
-
- /* Zero out 32KiB at IEDBASE + 1MiB */
- memset(ied_base + (1 << 20), 0, (32 << 10));
-}
-
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
- size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
-
- fill_in_relocation_params(&smm_reloc_params);
-
- smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
-
- setup_ied_area(&smm_reloc_params);
-
- *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
-}
-
-void smm_initialize(void)
-{
- /* Clear the SMM state in the southbridge. */
- smm_southbridge_clear_state();
-
- /*
- * Run the relocation handler for on the BSP to check and set up
- * parallel SMM relocation.
- */
- smm_initiate_relocation();
-
- if (smm_reloc_params.smm_save_state_in_msrs)
- printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
-}
-
-/* The default SMM entry can happen in parallel or serially. If the
- * default SMM entry is done in parallel the BSP has already setup
- * the saving state to each CPU's MSRs. At least one save state size
- * is required for the initial SMM entry for the BSP to determine if
- * parallel SMM relocation is even feasible. */
-void smm_relocate(void)
-{
- /*
- * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
- * shall take place. Run the relocation handler a second time on the
- * BSP to do * the final move. For APs, a relocation handler always
- * needs to be run.
- */
- if (smm_reloc_params.smm_save_state_in_msrs)
- smm_initiate_relocation_parallel();
- else if (!boot_cpu())
- smm_initiate_relocation();
-}
diff --git a/src/soc/intel/cannonlake/Makefile.inc b/src/soc/intel/cannonlake/Makefile.inc
index 0fcbcd1..0e6e297 100644
--- a/src/soc/intel/cannonlake/Makefile.inc
+++ b/src/soc/intel/cannonlake/Makefile.inc
@@ -47,7 +47,6 @@
ramstage-y += pmc.c
ramstage-y += pmutil.c
ramstage-y += reset.c
-ramstage-y += smmrelocate.c
ramstage-y += spi.c
ramstage-y += systemagent.c
ramstage-y += uart.c
diff --git a/src/soc/intel/cannonlake/cpu.c b/src/soc/intel/cannonlake/cpu.c
index f01b499..96d544e 100644
--- a/src/soc/intel/cannonlake/cpu.c
+++ b/src/soc/intel/cannonlake/cpu.c
@@ -441,12 +441,6 @@
set_vmx_and_lock();
}

-static void per_cpu_smm_trigger(void)
-{
- /* Relocate the SMM handler. */
- smm_relocate();
-}
-
static void post_mp_init(void)
{
/* Set Max Ratio */
@@ -462,7 +456,7 @@
smm_lock();
}

-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
/*
* Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
* that are set prior to ramstage.
@@ -470,16 +464,15 @@
*/
.pre_mp_init = soc_fsp_load,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = smm_initialize,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

void soc_init_cpus(struct bus *cpu_bus)
{
+ if (CONFIG(HAVE_SMI_HANDLER))
+ select_mp_ops_em64t101_detect(&mp_ops);
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");

diff --git a/src/soc/intel/cannonlake/smmrelocate.c b/src/soc/intel/cannonlake/smmrelocate.c
deleted file mode 100644
index 7239e6f..0000000
--- a/src/soc/intel/cannonlake/smmrelocate.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2014 Google Inc.
- * Copyright (C) 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <types.h>
-#include <string.h>
-#include <device/device.h>
-#include <device/pci.h>
-#include <device/pci_ops.h>
-#include <cpu/x86/cache.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
-#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/intel/smm_reloc.h>
-#include <console/console.h>
-#include <soc/cpu.h>
-#include <soc/msr.h>
-#include <soc/pci_devs.h>
-#include <soc/systemagent.h>
-#include "chip.h"
-
-
-static void update_save_state(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase,
- struct smm_relocation_params *relo_params)
-{
- u32 smbase;
- u32 iedbase;
-
- /*
- * The relocated handler runs with all CPUs concurrently. Therefore
- * stagger the entry points adjusting SMBASE downwards by save state
- * size * CPU num.
- */
- smbase = staggered_smbase;
- iedbase = relo_params->ied_base;
-
- printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
- smbase, iedbase);
-
- /*
- * All threads need to set IEDBASE and SMBASE to the relocated
- * handler region. However, the save state location depends on the
- * smm_save_state_in_msrs field in the relocation parameters. If
- * smm_save_state_in_msrs is non-zero then the CPUs are relocating
- * the SMM handler in parallel, and each CPUs save state area is
- * located in their respective MSR space. If smm_save_state_in_msrs
- * is zero then the SMM relocation is happening serially so the
- * save state is at the same default location for all CPUs.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smbase_msr;
- msr_t iedbase_msr;
-
- smbase_msr.lo = smbase;
- smbase_msr.hi = 0;
-
- /*
- * According the BWG the IEDBASE MSR is in bits 63:32. It's
- * not clear why it differs from the SMBASE MSR.
- */
- iedbase_msr.lo = 0;
- iedbase_msr.hi = iedbase;
-
- wrmsr(SMBASE_MSR, smbase_msr);
- wrmsr(IEDBASE_MSR, iedbase_msr);
- } else {
- em64t101_smm_state_save_area_t *save_state;
-
- save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
- sizeof(*save_state));
-
- save_state->smbase = smbase;
- save_state->iedbase = iedbase;
- }
-}
-
-/* Returns 1 if SMM MSR save state was set. */
-static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
-{
- msr_t smm_mca_cap;
-
- smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
- if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.hi = 0;
- smm_feature_control.lo |= SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- relo_params->smm_save_state_in_msrs = 1;
- }
- return relo_params->smm_save_state_in_msrs;
-}
-
-/*
- * The relocation work is actually performed in SMM context, but the code
- * resides in the ramstage module. This occurs by trampolining from the default
- * SMRAM entry point to here.
- */
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase)
-{
- msr_t mtrr_cap;
- struct smm_relocation_params *relo_params = &smm_reloc_params;
-
- printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
-
- /*
- * Determine if the processor supports saving state in MSRs. If so,
- * enable it before the non-BSPs run so that SMM relocation can occur
- * in parallel in the non-BSP CPUs.
- */
- if (cpu == 0) {
- /*
- * If smm_save_state_in_msrs is 1 then that means this is the
- * 2nd time through the relocation handler for the BSP.
- * Parallel SMM handler relocation is taking place. However,
- * it is desired to access other CPUs save state in the real
- * SMM handler. Therefore, disable the SMM save state in MSRs
- * feature.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- } else if (bsp_setup_msr_save_state(relo_params))
- /*
- * Just return from relocation handler if MSR save
- * state is enabled. In that case the BSP will come
- * back into the relocation handler to setup the new
- * SMBASE as well disabling SMM save state in MSRs.
- */
- return;
- }
-
- /* Make appropriate changes to the save state map. */
- update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
-
- /* Write SMRR MSRs based on indicated support. */
- mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (mtrr_cap.lo & SMRR_SUPPORTED)
- write_smrr(relo_params);
-}
-
-static void fill_in_relocation_params(struct smm_relocation_params *params)
-{
- uintptr_t tseg_base;
- size_t tseg_size;
- /* All range registers are aligned to 4KiB */
- const u32 rmask = ~(4 * KiB - 1);
-
- smm_region(&tseg_base, &tseg_size);
- smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
-
- /* SMRR has 32-bits of valid address aligned to 4KiB. */
- params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
- params->smrr_base.hi = 0;
- params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
- params->smrr_mask.hi = 0;
-}
-
-static void setup_ied_area(struct smm_relocation_params *params)
-{
- char *ied_base;
-
- struct ied_header ied = {
- .signature = "INTEL RSVD",
- .size = params->ied_size,
- .reserved = {0},
- };
-
- ied_base = (void *)params->ied_base;
-
- printk(BIOS_DEBUG, "IED base = 0x%08x\n", (u32)params->ied_base);
- printk(BIOS_DEBUG, "IED size = 0x%08x\n", (u32)params->ied_size);
-
- /* Place IED header at IEDBASE. */
- memcpy(ied_base, &ied, sizeof(ied));
-
- /* Zero out 32KiB at IEDBASE + 1MiB */
- memset(ied_base + 1 * MiB, 0, 32 * KiB);
-}
-
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
- size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
-
- fill_in_relocation_params(&smm_reloc_params);
-
- smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
-
- if (smm_reloc_params.ied_size)
- setup_ied_area(&smm_reloc_params);
-
- *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
-}
-
-void smm_initialize(void)
-{
- /* Clear the SMM state in the southbridge. */
- smm_southbridge_clear_state();
-
- /*
- * Run the relocation handler for on the BSP to check and set up
- * parallel SMM relocation.
- */
- smm_initiate_relocation();
-
- if (smm_reloc_params.smm_save_state_in_msrs)
- printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
-}
-
-void smm_relocate(void)
-{
- /*
- * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
- * shall take place. Run the relocation handler a second time on the
- * BSP to do * the final move. For APs, a relocation handler always
- * needs to be run.
- */
- if (smm_reloc_params.smm_save_state_in_msrs)
- smm_initiate_relocation_parallel();
- else if (!boot_cpu())
- smm_initiate_relocation();
-}
diff --git a/src/soc/intel/denverton_ns/Makefile.inc b/src/soc/intel/denverton_ns/Makefile.inc
index 4050f61..b52c209 100644
--- a/src/soc/intel/denverton_ns/Makefile.inc
+++ b/src/soc/intel/denverton_ns/Makefile.inc
@@ -67,8 +67,8 @@
ramstage-y += hob_mem.c
ramstage-$(CONFIG_DRIVERS_UART_8250MEM) += uart_debug.c
ramstage-$(CONFIG_HAVE_ACPI_TABLES) += acpi.c
-ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smm.c
-ramstage-$(CONFIG_HAVE_SMI_HANDLER) += pmutil.c
+ramstage-y += smm.c
+ramstage-y += pmutil.c
ramstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c
ramstage-$(CONFIG_DISPLAY_HOBS) += hob_display.c

diff --git a/src/soc/intel/denverton_ns/cpu.c b/src/soc/intel/denverton_ns/cpu.c
index 1363d35..3365091 100644
--- a/src/soc/intel/denverton_ns/cpu.c
+++ b/src/soc/intel/denverton_ns/cpu.c
@@ -183,7 +183,6 @@
((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
}

-
/*
* Do essential initialization tasks before APs can be fired up
*
diff --git a/src/soc/intel/icelake/Makefile.inc b/src/soc/intel/icelake/Makefile.inc
index 67a3a71..e114f7e 100644
--- a/src/soc/intel/icelake/Makefile.inc
+++ b/src/soc/intel/icelake/Makefile.inc
@@ -39,8 +39,9 @@
ramstage-y += lockdown.c
ramstage-y += p2sb.c
ramstage-y += pmc.c
+ramstage-y += pmutil.c
ramstage-y += reset.c
-ramstage-y += smmrelocate.c
+ramstage-y += spi.c
ramstage-y += systemagent.c
ramstage-y += sd.c

diff --git a/src/soc/intel/icelake/cpu.c b/src/soc/intel/icelake/cpu.c
index e058442..a92ccd0 100644
--- a/src/soc/intel/icelake/cpu.c
+++ b/src/soc/intel/icelake/cpu.c
@@ -217,12 +217,6 @@
enable_turbo();
}

-static void per_cpu_smm_trigger(void)
-{
- /* Relocate the SMM handler. */
- smm_relocate();
-}
-
static void post_mp_init(void)
{
/* Set Max Ratio */
@@ -238,7 +232,7 @@
smm_lock();
}

-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
/*
* Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
* that are set prior to ramstage.
@@ -246,16 +240,15 @@
*/
.pre_mp_init = soc_fsp_load,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = smm_initialize,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

void soc_init_cpus(struct bus *cpu_bus)
{
+ if (CONFIG(HAVE_SMI_HANDLER))
+ select_mp_ops_em64t101_detect(&mp_ops);
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
diff --git a/src/soc/intel/icelake/smmrelocate.c b/src/soc/intel/icelake/smmrelocate.c
deleted file mode 100644
index 7bfdd53..0000000
--- a/src/soc/intel/icelake/smmrelocate.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2018 Intel Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <types.h>
-#include <string.h>
-#include <device/device.h>
-#include <device/pci.h>
-#include <device/pci_ops.h>
-#include <cpu/x86/cache.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
-#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/intel/smm_reloc.h>
-#include <console/console.h>
-#include <soc/cpu.h>
-#include <soc/msr.h>
-#include <soc/pci_devs.h>
-#include <soc/soc_chip.h>
-#include <soc/systemagent.h>
-
-
-static void update_save_state(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase,
- struct smm_relocation_params *relo_params)
-{
- u32 smbase;
- u32 iedbase;
-
- /*
- * The relocated handler runs with all CPUs concurrently. Therefore
- * stagger the entry points adjusting SMBASE downwards by save state
- * size * CPU num.
- */
- smbase = staggered_smbase;
- iedbase = relo_params->ied_base;
-
- printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
- smbase, iedbase);
-
- /*
- * All threads need to set IEDBASE and SMBASE to the relocated
- * handler region. However, the save state location depends on the
- * smm_save_state_in_msrs field in the relocation parameters. If
- * smm_save_state_in_msrs is non-zero then the CPUs are relocating
- * the SMM handler in parallel, and each CPUs save state area is
- * located in their respective MSR space. If smm_save_state_in_msrs
- * is zero then the SMM relocation is happening serially so the
- * save state is at the same default location for all CPUs.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smbase_msr;
- msr_t iedbase_msr;
-
- smbase_msr.lo = smbase;
- smbase_msr.hi = 0;
-
- /*
- * According the BWG the IEDBASE MSR is in bits 63:32. It's
- * not clear why it differs from the SMBASE MSR.
- */
- iedbase_msr.lo = 0;
- iedbase_msr.hi = iedbase;
-
- wrmsr(SMBASE_MSR, smbase_msr);
- wrmsr(IEDBASE_MSR, iedbase_msr);
- } else {
- em64t101_smm_state_save_area_t *save_state;
-
- save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
- sizeof(*save_state));
-
- save_state->smbase = smbase;
- save_state->iedbase = iedbase;
- }
-}
-
-/* Returns 1 if SMM MSR save state was set. */
-static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
-{
- msr_t smm_mca_cap;
-
- smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
- if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.hi = 0;
- smm_feature_control.lo |= SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- relo_params->smm_save_state_in_msrs = 1;
- }
- return relo_params->smm_save_state_in_msrs;
-}
-
-/*
- * The relocation work is actually performed in SMM context, but the code
- * resides in the ramstage module. This occurs by trampolining from the default
- * SMRAM entry point to here.
- */
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase)
-{
- msr_t mtrr_cap;
- struct smm_relocation_params *relo_params = &smm_reloc_params;
-
- printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
-
- /*
- * Determine if the processor supports saving state in MSRs. If so,
- * enable it before the non-BSPs run so that SMM relocation can occur
- * in parallel in the non-BSP CPUs.
- */
- if (cpu == 0) {
- /*
- * If smm_save_state_in_msrs is 1 then that means this is the
- * 2nd time through the relocation handler for the BSP.
- * Parallel SMM handler relocation is taking place. However,
- * it is desired to access other CPUs save state in the real
- * SMM handler. Therefore, disable the SMM save state in MSRs
- * feature.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- } else if (bsp_setup_msr_save_state(relo_params))
- /*
- * Just return from relocation handler if MSR save
- * state is enabled. In that case the BSP will come
- * back into the relocation handler to setup the new
- * SMBASE as well disabling SMM save state in MSRs.
- */
- return;
- }
-
- /* Make appropriate changes to the save state map. */
- update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
-
- /* Write SMRR MSRs based on indicated support. */
- mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (mtrr_cap.lo & SMRR_SUPPORTED)
- write_smrr(relo_params);
-}
-
-static void fill_in_relocation_params(struct smm_relocation_params *params)
-{
- uintptr_t tseg_base;
- size_t tseg_size;
- /* All range registers are aligned to 4KiB */
- const u32 rmask = ~(4 * KiB - 1);
-
- smm_region(&tseg_base, &tseg_size);
-
- if (!IS_ALIGNED(tseg_base, tseg_size)) {
- printk(BIOS_WARNING,
- "TSEG base not aligned with TSEG SIZE! Not setting SMRR\n");
- return;
- }
-
- smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
-
- /* SMRR has 32-bits of valid address aligned to 4KiB. */
- params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
- params->smrr_base.hi = 0;
- params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
- params->smrr_mask.hi = 0;
-}
-
-static void setup_ied_area(struct smm_relocation_params *params)
-{
- char *ied_base;
-
- struct ied_header ied = {
- .signature = "INTEL RSVD",
- .size = params->ied_size,
- .reserved = {0},
- };
-
- ied_base = (void *)params->ied_base;
-
- printk(BIOS_DEBUG, "IED base = 0x%08x\n", (u32)params->ied_base);
- printk(BIOS_DEBUG, "IED size = 0x%08x\n", (u32)params->ied_size);
-
- /* Place IED header at IEDBASE. */
- memcpy(ied_base, &ied, sizeof(ied));
-
- /* Zero out 32KiB at IEDBASE + 1MiB */
- memset(ied_base + 1 * MiB, 0, 32 * KiB);
-}
-
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
- size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
-
- fill_in_relocation_params(&smm_reloc_params);
-
- smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
-
- if (smm_reloc_params.ied_size)
- setup_ied_area(&smm_reloc_params);
-
- *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
-}
-
-void smm_initialize(void)
-{
- /* Clear the SMM state in the southbridge. */
- smm_southbridge_clear_state();
-
- /*
- * Run the relocation handler for on the BSP to check and set up
- * parallel SMM relocation.
- */
- smm_initiate_relocation();
-
- if (smm_reloc_params.smm_save_state_in_msrs)
- printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
-}
-
-void smm_relocate(void)
-{
- /*
- * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
- * shall take place. Run the relocation handler a second time on the
- * BSP to do * the final move. For APs, a relocation handler always
- * needs to be run.
- */
- if (smm_reloc_params.smm_save_state_in_msrs)
- smm_initiate_relocation_parallel();
- else if (!boot_cpu())
- smm_initiate_relocation();
-}
diff --git a/src/soc/intel/skylake/Makefile.inc b/src/soc/intel/skylake/Makefile.inc
index b049e84..56aa7f1 100644
--- a/src/soc/intel/skylake/Makefile.inc
+++ b/src/soc/intel/skylake/Makefile.inc
@@ -58,7 +58,6 @@
ramstage-y += pmutil.c
ramstage-y += reset.c
ramstage-y += sd.c
-ramstage-y += smmrelocate.c
ramstage-y += spi.c
ramstage-y += systemagent.c
ramstage-y += uart.c
diff --git a/src/soc/intel/skylake/cpu.c b/src/soc/intel/skylake/cpu.c
index 9a487c6..fde37ee 100644
--- a/src/soc/intel/skylake/cpu.c
+++ b/src/soc/intel/skylake/cpu.c
@@ -479,12 +479,6 @@
prmrr_core_configure();
}

-static void per_cpu_smm_trigger(void)
-{
- /* Relocate the SMM handler. */
- smm_relocate();
-}
-
static void vmx_configure(void *unused)
{
set_feature_ctrl_vmx();
@@ -522,7 +516,7 @@
printk(BIOS_CRIT, "CRITICAL ERROR: MP post init failed\n");
}

-static const struct mp_ops mp_ops = {
+static struct mp_ops mp_ops = {
/*
* Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
* that are set prior to ramstage.
@@ -530,16 +524,15 @@
*/
.pre_mp_init = soc_fsp_load,
.get_cpu_count = get_cpu_count,
- .get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
- .pre_mp_smm_init = smm_initialize,
- .per_cpu_smm_trigger = per_cpu_smm_trigger,
- .relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};

void soc_init_cpus(struct bus *cpu_bus)
{
+ if (CONFIG(HAVE_SMI_HANDLER))
+ select_mp_ops_em64t101_detect(&mp_ops);
+
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");

diff --git a/src/soc/intel/skylake/smmrelocate.c b/src/soc/intel/skylake/smmrelocate.c
deleted file mode 100644
index ba98730..0000000
--- a/src/soc/intel/skylake/smmrelocate.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2014 Google Inc.
- * Copyright (C) 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <types.h>
-#include <string.h>
-#include <device/device.h>
-#include <device/pci.h>
-#include <device/pci_ops.h>
-#include <cpu/x86/cache.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
-#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/intel/smm_reloc.h>
-#include <console/console.h>
-#include <soc/cpu.h>
-#include <soc/msr.h>
-#include <soc/pci_devs.h>
-#include <soc/systemagent.h>
-#include "chip.h"
-
-
-static void update_save_state(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase,
- struct smm_relocation_params *relo_params)
-{
- u32 smbase;
- u32 iedbase;
-
- /*
- * The relocated handler runs with all CPUs concurrently. Therefore
- * stagger the entry points adjusting SMBASE downwards by save state
- * size * CPU num.
- */
- smbase = staggered_smbase;
- iedbase = relo_params->ied_base;
-
- printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
- smbase, iedbase);
-
- /*
- * All threads need to set IEDBASE and SMBASE to the relocated
- * handler region. However, the save state location depends on the
- * smm_save_state_in_msrs field in the relocation parameters. If
- * smm_save_state_in_msrs is non-zero then the CPUs are relocating
- * the SMM handler in parallel, and each CPUs save state area is
- * located in their respective MSR space. If smm_save_state_in_msrs
- * is zero then the SMM relocation is happening serially so the
- * save state is at the same default location for all CPUs.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smbase_msr;
- msr_t iedbase_msr;
-
- smbase_msr.lo = smbase;
- smbase_msr.hi = 0;
-
- /*
- * According the BWG the IEDBASE MSR is in bits 63:32. It's
- * not clear why it differs from the SMBASE MSR.
- */
- iedbase_msr.lo = 0;
- iedbase_msr.hi = iedbase;
-
- wrmsr(SMBASE_MSR, smbase_msr);
- wrmsr(IEDBASE_MSR, iedbase_msr);
- } else {
- em64t101_smm_state_save_area_t *save_state;
-
- save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
- sizeof(*save_state));
-
- save_state->smbase = smbase;
- save_state->iedbase = iedbase;
- }
-}
-
-/* Returns 1 if SMM MSR save state was set. */
-static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
-{
- msr_t smm_mca_cap;
-
- smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
- if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.hi = 0;
- smm_feature_control.lo |= SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- relo_params->smm_save_state_in_msrs = 1;
- }
- return relo_params->smm_save_state_in_msrs;
-}
-
-/*
- * The relocation work is actually performed in SMM context, but the code
- * resides in the ramstage module. This occurs by trampolining from the default
- * SMRAM entry point to here.
- */
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase)
-{
- msr_t mtrr_cap;
- struct smm_relocation_params *relo_params = &smm_reloc_params;
-
- printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
-
- /*
- * Determine if the processor supports saving state in MSRs. If so,
- * enable it before the non-BSPs run so that SMM relocation can occur
- * in parallel in the non-BSP CPUs.
- */
- if (cpu == 0) {
- /*
- * If smm_save_state_in_msrs is 1 then that means this is the
- * 2nd time through the relocation handler for the BSP.
- * Parallel SMM handler relocation is taking place. However,
- * it is desired to access other CPUs save state in the real
- * SMM handler. Therefore, disable the SMM save state in MSRs
- * feature.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- } else if (bsp_setup_msr_save_state(relo_params))
- /*
- * Just return from relocation handler if MSR save
- * state is enabled. In that case the BSP will come
- * back into the relocation handler to setup the new
- * SMBASE as well disabling SMM save state in MSRs.
- */
- return;
- }
-
- /* Make appropriate changes to the save state map. */
- update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
-
- /* Write SMRR MSRs based on indicated support. */
- mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (mtrr_cap.lo & SMRR_SUPPORTED)
- write_smrr(relo_params);
-}
-
-static void fill_in_relocation_params(struct smm_relocation_params *params)
-{
- uintptr_t tseg_base;
- size_t tseg_size;
- /* All range registers are aligned to 4KiB */
- const u32 rmask = ~((1 << 12) - 1);
-
- smm_region(&tseg_base, &tseg_size);
- smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
-
- /* SMRR has 32-bits of valid address aligned to 4KiB. */
- params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
- params->smrr_base.hi = 0;
- params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
- params->smrr_mask.hi = 0;
-}
-
-static void setup_ied_area(struct smm_relocation_params *params)
-{
- char *ied_base;
-
- struct ied_header ied = {
- .signature = "INTEL RSVD",
- .size = params->ied_size,
- .reserved = {0},
- };
-
- ied_base = (void *)params->ied_base;
-
- printk(BIOS_DEBUG, "IED base = 0x%08x\n", (u32) params->ied_base);
- printk(BIOS_DEBUG, "IED size = 0x%08x\n", (u32) params->ied_size);
-
- /* Place IED header at IEDBASE. */
- memcpy(ied_base, &ied, sizeof(ied));
-
- /* Zero out 32KiB at IEDBASE + 1MiB */
- memset(ied_base + (1 << 20), 0, (32 << 10));
-}
-
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
- size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
-
- fill_in_relocation_params(&smm_reloc_params);
-
- smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
-
- if (smm_reloc_params.ied_size)
- setup_ied_area(&smm_reloc_params);
-
- *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
-}
-
-void smm_initialize(void)
-{
- /* Clear the SMM state in the southbridge. */
- smm_southbridge_clear_state();
-
- /*
- * Run the relocation handler for on the BSP to check and set up
- * parallel SMM relocation.
- */
- smm_initiate_relocation();
-
- if (smm_reloc_params.smm_save_state_in_msrs)
- printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
-}
-
-void smm_relocate(void)
-{
- /*
- * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
- * shall take place. Run the relocation handler a second time on the
- * BSP to do * the final move. For APs, a relocation handler always
- * needs to be run.
- */
- if (smm_reloc_params.smm_save_state_in_msrs)
- smm_initiate_relocation_parallel();
- else if (!boot_cpu())
- smm_initiate_relocation();
-}
diff --git a/src/soc/intel/tigerlake/Makefile.inc b/src/soc/intel/tigerlake/Makefile.inc
index b402fa0..4d5b9f5 100644
--- a/src/soc/intel/tigerlake/Makefile.inc
+++ b/src/soc/intel/tigerlake/Makefile.inc
@@ -40,7 +40,6 @@
ramstage-y += p2sb.c
ramstage-y += pmc.c
ramstage-y += reset.c
-ramstage-y += smmrelocate.c
ramstage-y += systemagent.c
ramstage-y += sd.c

diff --git a/src/soc/intel/tigerlake/smmrelocate.c b/src/soc/intel/tigerlake/smmrelocate.c
deleted file mode 100644
index 023f04b..0000000
--- a/src/soc/intel/tigerlake/smmrelocate.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2019 Intel Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <types.h>
-#include <string.h>
-#include <device/device.h>
-#include <device/pci.h>
-#include <device/pci_ops.h>
-#include <cpu/x86/cache.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
-#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/intel/smm_reloc.h>
-#include <console/console.h>
-#include <soc/cpu.h>
-#include <soc/msr.h>
-#include <soc/pci_devs.h>
-#include <soc/soc_chip.h>
-#include <soc/systemagent.h>
-
-/* This gets filled in and used during relocation. */
-static struct smm_relocation_params smm_reloc_params;
-
-static inline void write_smrr(struct smm_relocation_params *relo_params)
-{
- printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
- relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
- wrmsr(IA32_SMRR_PHYS_BASE, relo_params->smrr_base);
- wrmsr(IA32_SMRR_PHYS_MASK, relo_params->smrr_mask);
-}
-
-static void update_save_state(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase,
- struct smm_relocation_params *relo_params)
-{
- u32 smbase;
- u32 iedbase;
-
- /*
- * The relocated handler runs with all CPUs concurrently. Therefore
- * stagger the entry points adjusting SMBASE downwards by save state
- * size * CPU num.
- */
- smbase = staggered_smbase;
- iedbase = relo_params->ied_base;
-
- printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
- smbase, iedbase);
-
- /*
- * All threads need to set IEDBASE and SMBASE to the relocated
- * handler region. However, the save state location depends on the
- * smm_save_state_in_msrs field in the relocation parameters. If
- * smm_save_state_in_msrs is non-zero then the CPUs are relocating
- * the SMM handler in parallel, and each CPUs save state area is
- * located in their respective MSR space. If smm_save_state_in_msrs
- * is zero then the SMM relocation is happening serially so the
- * save state is at the same default location for all CPUs.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smbase_msr;
- msr_t iedbase_msr;
-
- smbase_msr.lo = smbase;
- smbase_msr.hi = 0;
-
- /*
- * According the BWG the IEDBASE MSR is in bits 63:32. It's
- * not clear why it differs from the SMBASE MSR.
- */
- iedbase_msr.lo = 0;
- iedbase_msr.hi = iedbase;
-
- wrmsr(SMBASE_MSR, smbase_msr);
- wrmsr(IEDBASE_MSR, iedbase_msr);
- } else {
- em64t101_smm_state_save_area_t *save_state;
-
- save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
- sizeof(*save_state));
-
- save_state->smbase = smbase;
- save_state->iedbase = iedbase;
- }
-}
-
-/* Returns 1 if SMM MSR save state was set. */
-static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
-{
- msr_t smm_mca_cap;
-
- smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
- if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.hi = 0;
- smm_feature_control.lo |= SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- relo_params->smm_save_state_in_msrs = 1;
- }
- return relo_params->smm_save_state_in_msrs;
-}
-
-/*
- * The relocation work is actually performed in SMM context, but the code
- * resides in the ramstage module. This occurs by trampolining from the default
- * SMRAM entry point to here.
- */
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
- uintptr_t staggered_smbase)
-{
- msr_t mtrr_cap;
- struct smm_relocation_params *relo_params = &smm_reloc_params;
-
- printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
-
- /*
- * Determine if the processor supports saving state in MSRs. If so,
- * enable it before the non-BSPs run so that SMM relocation can occur
- * in parallel in the non-BSP CPUs.
- */
- if (cpu == 0) {
- /*
- * If smm_save_state_in_msrs is 1 then that means this is the
- * 2nd time through the relocation handler for the BSP.
- * Parallel SMM handler relocation is taking place. However,
- * it is desired to access other CPUs save state in the real
- * SMM handler. Therefore, disable the SMM save state in MSRs
- * feature.
- */
- if (relo_params->smm_save_state_in_msrs) {
- msr_t smm_feature_control;
-
- smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
- smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
- wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
- } else if (bsp_setup_msr_save_state(relo_params))
- /*
- * Just return from relocation handler if MSR save
- * state is enabled. In that case the BSP will come
- * back into the relocation handler to setup the new
- * SMBASE as well disabling SMM save state in MSRs.
- */
- return;
- }
-
- /* Make appropriate changes to the save state map. */
- update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
-
- /* Write SMRR MSRs based on indicated support. */
- mtrr_cap = rdmsr(MTRR_CAP_MSR);
- if (mtrr_cap.lo & SMRR_SUPPORTED)
- write_smrr(relo_params);
-}
-
-static void fill_in_relocation_params(struct smm_relocation_params *params)
-{
- uintptr_t tseg_base;
- size_t tseg_size;
- /* All range registers are aligned to 4KiB */
- const u32 rmask = ~(4 * KiB - 1);
-
- smm_region(&tseg_base, &tseg_size);
-
- if (!IS_ALIGNED(tseg_base, tseg_size)) {
- printk(BIOS_WARNING,
- "TSEG base not aligned with TSEG SIZE! Not setting SMRR\n");
- return;
- }
-
- smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
-
- /* SMRR has 32-bits of valid address aligned to 4KiB. */
- params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
- params->smrr_base.hi = 0;
- params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
- params->smrr_mask.hi = 0;
-}
-
-static void setup_ied_area(struct smm_relocation_params *params)
-{
- char *ied_base;
-
- struct ied_header ied = {
- .signature = "INTEL RSVD",
- .size = params->ied_size,
- .reserved = {0},
- };
-
- ied_base = (void *)params->ied_base;
-
- printk(BIOS_DEBUG, "IED base = 0x%08x\n", (u32)params->ied_base);
- printk(BIOS_DEBUG, "IED size = 0x%08x\n", (u32)params->ied_size);
-
- /* Place IED header at IEDBASE. */
- memcpy(ied_base, &ied, sizeof(ied));
-
- /* Zero out 32KiB at IEDBASE + 1MiB */
- memset(ied_base + 1 * MiB, 0, 32 * KiB);
-}
-
-void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
- size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
-
- fill_in_relocation_params(&smm_reloc_params);
-
- smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
-
- if (smm_reloc_params.ied_size)
- setup_ied_area(&smm_reloc_params);
-
- *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
-}
-
-void smm_initialize(void)
-{
- /* Clear the SMM state in the southbridge. */
- smm_southbridge_clear_state();
-
- /*
- * Run the relocation handler for on the BSP to check and set up
- * parallel SMM relocation.
- */
- smm_initiate_relocation();
-
- if (smm_reloc_params.smm_save_state_in_msrs)
- printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
-}
-
-void smm_relocate(void)
-{
- /*
- * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
- * shall take place. Run the relocation handler a second time on the
- * BSP to do * the final move. For APs, a relocation handler always
- * needs to be run.
- */
- if (smm_reloc_params.smm_save_state_in_msrs)
- smm_initiate_relocation_parallel();
- else if (!boot_cpu())
- smm_initiate_relocation();
-}
diff --git a/src/southbridge/intel/lynxpoint/Makefile.inc b/src/southbridge/intel/lynxpoint/Makefile.inc
index e53ed8d..2c0cb3b 100644
--- a/src/southbridge/intel/lynxpoint/Makefile.inc
+++ b/src/southbridge/intel/lynxpoint/Makefile.inc
@@ -38,10 +38,10 @@
ramstage-y += rcba.c
ramstage-y += me_status.c
ramstage-y += acpi.c
+ramstage-y += smi.c pmutil.c

ramstage-$(CONFIG_ELOG) += elog.c

-ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smi.c pmutil.c
smm-y += smihandler.c me_9.x.c pch.c
smm-y += pmutil.c usb_ehci.c usb_xhci.c


To view, visit change 37108. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I177ca3b2966ccd985d72749ea2bd096ec4f233d6
Gerrit-Change-Number: 37108
Gerrit-PatchSet: 1
Gerrit-Owner: Kyösti Mälkki <kyosti.malkki@gmail.com>
Gerrit-Reviewer: David Guckian <david.guckian@intel.com>
Gerrit-Reviewer: Kyösti Mälkki <kyosti.malkki@gmail.com>
Gerrit-Reviewer: Martin Roth <martinroth@google.com>
Gerrit-Reviewer: Patrick Georgi <pgeorgi@google.com>
Gerrit-Reviewer: Patrick Rudolph <siro@das-labor.org>
Gerrit-Reviewer: Vanny E <vanessa.f.eusebio@intel.com>
Gerrit-MessageType: newchange