Kyösti Mälkki has uploaded this change for review.

View Change

intel/smm: Chain .pre_cpu_smm_trigger

Change-Id: I2533fc478441f5b790802df9a3ec63c5a2e496af
Signed-off-by: Kyösti Mälkki <kyosti.malkki@gmail.com>
---
M src/cpu/intel/haswell/haswell_init.c
M src/cpu/intel/model_2065x/model_2065x_init.c
M src/cpu/intel/model_206ax/model_206ax_init.c
M src/cpu/intel/smm/em64t101.c
M src/cpu/intel/smm/gen1/smmrelocate.c
M src/include/cpu/intel/smm_reloc.h
M src/soc/intel/baytrail/cpu.c
M src/soc/intel/braswell/cpu.c
M src/soc/intel/broadwell/cpu.c
9 files changed, 65 insertions(+), 23 deletions(-)

git pull ssh://review.coreboot.org:29418/coreboot refs/changes/44/37144/1
diff --git a/src/cpu/intel/haswell/haswell_init.c b/src/cpu/intel/haswell/haswell_init.c
index c4c34ca..eb0c22e 100644
--- a/src/cpu/intel/haswell/haswell_init.c
+++ b/src/cpu/intel/haswell/haswell_init.c
@@ -746,15 +746,23 @@
*parallel = 1;
}

+static void (*smm_trigger)(void);
+
static void per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
- smm_relocate();
+ smm_trigger();

/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_patch);
}

+static void chain_smm_trigger(struct mp_ops *mp_ops)
+{
+ smm_trigger = mp_ops->per_cpu_smm_trigger;
+ mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger;
+}
+
static void post_mp_init(void)
{
/* Now that all APs have been relocated as well as the BSP let SMIs
@@ -775,7 +783,7 @@
void mp_init_cpus(struct bus *cpu_bus)
{
select_mp_ops_em64t101_cpu_save(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger;
+ chain_smm_trigger(&mp_ops);

if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
diff --git a/src/cpu/intel/model_2065x/model_2065x_init.c b/src/cpu/intel/model_2065x/model_2065x_init.c
index ac9713f..f9e4827 100644
--- a/src/cpu/intel/model_2065x/model_2065x_init.c
+++ b/src/cpu/intel/model_2065x/model_2065x_init.c
@@ -300,15 +300,23 @@
*parallel = 1;
}

+static void (*smm_trigger)(void);
+
static void per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
- smm_relocate();
+ smm_trigger();

/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_patch);
}

+static void chain_smm_trigger(struct mp_ops *mp_ops)
+{
+ smm_trigger = mp_ops->per_cpu_smm_trigger;
+ mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger;
+}
+
static void post_mp_init(void)
{
/* Now that all APs have been relocated as well as the BSP let SMIs
@@ -330,7 +338,7 @@
void mp_init_cpus(struct bus *cpu_bus)
{
select_mp_ops_em64t101(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger;
+ chain_smm_trigger(&mp_ops);

if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
diff --git a/src/cpu/intel/model_206ax/model_206ax_init.c b/src/cpu/intel/model_206ax/model_206ax_init.c
index a5e66fe..a00757d 100644
--- a/src/cpu/intel/model_206ax/model_206ax_init.c
+++ b/src/cpu/intel/model_206ax/model_206ax_init.c
@@ -526,15 +526,23 @@
*parallel = 1;
}

+static void (*smm_trigger)(void);
+
static void per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
- smm_relocate();
+ smm_trigger();

/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_patch);
}

+static void chain_smm_trigger(struct mp_ops *mp_ops)
+{
+ smm_trigger = mp_ops->per_cpu_smm_trigger;
+ mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger;
+}
+
static void post_mp_init(void)
{
/* Now that all APs have been relocated as well as the BSP let SMIs
diff --git a/src/cpu/intel/smm/em64t101.c b/src/cpu/intel/smm/em64t101.c
index e4dd454..d3e45bc 100644
--- a/src/cpu/intel/smm/em64t101.c
+++ b/src/cpu/intel/smm/em64t101.c
@@ -54,7 +54,7 @@
* the saving state to each CPU's MSRs. At least one save state size
* is required for the initial SMM entry for the BSP to determine if
* parallel SMM relocation is even feasible. */
-void smm_relocate(void)
+static void per_cpu_smm_trigger(void)
{
/*
* If smm_save_state_in_msrs is non-zero then parallel SMM relocation
@@ -68,12 +68,6 @@
smm_initiate_relocation();
}

-static void per_cpu_smm_trigger(void)
-{
- /* Relocate the SMM handler. */
- smm_relocate();
-}
-
/* The relocation work is actually performed in SMM context, but the code
* resides in the ramstage module. This occurs by trampolining from the default
* SMRAM entry point to here. */
diff --git a/src/cpu/intel/smm/gen1/smmrelocate.c b/src/cpu/intel/smm/gen1/smmrelocate.c
index eccd939..87ce02f 100644
--- a/src/cpu/intel/smm/gen1/smmrelocate.c
+++ b/src/cpu/intel/smm/gen1/smmrelocate.c
@@ -33,12 +33,14 @@

#define SMRR_SUPPORTED (1 << 11)

+static void (*smm_trigger)(void);
+
static void per_cpu_smm_trigger_alt_1(void)
{
set_vmx_and_lock();

/* Relocate the SMM handler. */
- smm_relocate();
+ smm_trigger();
}

static void per_cpu_smm_trigger_alt_2(void)
@@ -66,7 +68,7 @@
}

/* Relocate the SMM handler. */
- smm_relocate();
+ smm_trigger();
}

/* The relocation work is actually performed in SMM context, but the code
@@ -116,6 +118,7 @@
struct cpuinfo_x86 c;

select_mp_ops_em64t101(mp_ops);
+ smm_trigger = mp_ops->per_cpu_smm_trigger;

mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger_alt_1;

diff --git a/src/include/cpu/intel/smm_reloc.h b/src/include/cpu/intel/smm_reloc.h
index 790df30..64dda8d 100644
--- a/src/include/cpu/intel/smm_reloc.h
+++ b/src/include/cpu/intel/smm_reloc.h
@@ -54,9 +54,6 @@
void fill_in_ied_params(struct smm_relocation_params *params);
void write_smm_msrs(int cpu, struct smm_relocation_params *relo_params);

-/* These helpers are for performing SMM relocation. */
-void smm_relocate(void);
-
#define D_OPEN (1 << 6)
#define D_CLS (1 << 5)
#define D_LCK (1 << 4)
diff --git a/src/soc/intel/baytrail/cpu.c b/src/soc/intel/baytrail/cpu.c
index 44b3c59..28355bd 100644
--- a/src/soc/intel/baytrail/cpu.c
+++ b/src/soc/intel/baytrail/cpu.c
@@ -138,17 +138,25 @@
*parallel = 1;
}

+static void (*smm_trigger)(void);
+
static void per_cpu_smm_trigger(void)
{
const struct pattrs *pattrs = pattrs_get();

/* Relocate SMM space. */
- smm_initiate_relocation();
+ smm_trigger();

/* Load microcode after SMM relocation. */
intel_microcode_load_unlocked(pattrs->microcode_patch);
}

+static void chain_smm_trigger(struct mp_ops *mp_ops)
+{
+ smm_trigger = mp_ops->per_cpu_smm_trigger;
+ mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger;
+}
+
static void post_mp_init(void)
{
smm_southbridge_enable_smi();
@@ -166,7 +174,7 @@
struct bus *cpu_bus = dev->link_list;

select_mp_ops_em64t100(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger;
+ chain_smm_trigger(&mp_ops);

if (mp_init_with_smm(cpu_bus, &mp_ops)) {
printk(BIOS_ERR, "MP initialization failure.\n");
diff --git a/src/soc/intel/braswell/cpu.c b/src/soc/intel/braswell/cpu.c
index 7454f94..d803013 100644
--- a/src/soc/intel/braswell/cpu.c
+++ b/src/soc/intel/braswell/cpu.c
@@ -147,6 +147,8 @@
*parallel = 1;
}

+static void (*smm_trigger)(void);
+
static void per_cpu_smm_trigger(void)
{
const struct pattrs *pattrs = pattrs_get();
@@ -158,12 +160,18 @@
intel_microcode_load_unlocked(pattrs->microcode_patch);

/* Relocate SMM space. */
- smm_initiate_relocation();
+ smm_trigger();

/* Load microcode after SMM relocation. */
intel_microcode_load_unlocked(pattrs->microcode_patch);
}

+static void chain_smm_trigger(struct mp_ops *mp_ops)
+{
+ smm_trigger = mp_ops->per_cpu_smm_trigger;
+ mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger;
+}
+
static void post_mp_init(void)
{
smm_southbridge_enable_smi();
@@ -184,7 +192,7 @@
__FILE__, __func__, dev_name(dev));

select_mp_ops_em64t100(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger;
+ chain_smm_trigger(&mp_ops);

if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
diff --git a/src/soc/intel/broadwell/cpu.c b/src/soc/intel/broadwell/cpu.c
index f19ee7a..6a514e9 100644
--- a/src/soc/intel/broadwell/cpu.c
+++ b/src/soc/intel/broadwell/cpu.c
@@ -635,15 +635,23 @@
*parallel = 1;
}

+static void (*smm_trigger)(void);
+
static void per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
- smm_relocate();
+ smm_trigger();

/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_patch);
}

+static void chain_smm_trigger(struct mp_ops *mp_ops)
+{
+ smm_trigger = mp_ops->per_cpu_smm_trigger;
+ mp_ops->per_cpu_smm_trigger = per_cpu_smm_trigger;
+}
+
static void post_mp_init(void)
{
/* Set Max Ratio */
@@ -669,7 +677,7 @@
struct bus *cpu_bus = dev->link_list;

select_mp_ops_em64t101_cpu_save(&mp_ops);
- mp_ops.per_cpu_smm_trigger = per_cpu_smm_trigger;
+ chain_smm_trigger(&mp_ops);

if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");

To view, visit change 37144. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I2533fc478441f5b790802df9a3ec63c5a2e496af
Gerrit-Change-Number: 37144
Gerrit-PatchSet: 1
Gerrit-Owner: Kyösti Mälkki <kyosti.malkki@gmail.com>
Gerrit-Reviewer: Kyösti Mälkki <kyosti.malkki@gmail.com>
Gerrit-Reviewer: Patrick Rudolph <siro@das-labor.org>
Gerrit-MessageType: newchange