Naresh Solanki has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/85640?usp=email )
Change subject: soc/amd/glinda/cpu: Update cache info ......................................................................
soc/amd/glinda/cpu: Update cache info
Change-Id: I46947e8ac62c903036a81642e03201e353c3dac6 Signed-off-by: Naresh Solanki naresh.solanki@9elements.com --- M src/soc/amd/glinda/cpu.c 1 file changed, 80 insertions(+), 0 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/40/85640/1
diff --git a/src/soc/amd/glinda/cpu.c b/src/soc/amd/glinda/cpu.c index a716127..2d10130 100644 --- a/src/soc/amd/glinda/cpu.c +++ b/src/soc/amd/glinda/cpu.c @@ -6,12 +6,92 @@ #include <amdblocks/mca.h> #include <cpu/amd/microcode.h> #include <cpu/cpu.h> +#include <cpu/x86/mp.h> +#include <cpu/x86/mtrr.h> #include <device/device.h> +#include <lib.h> +#include <smbios.h> #include <soc/cpu.h>
_Static_assert(CONFIG_MAX_CPUS == 24, "Do not override MAX_CPUS. To reduce the number of " "available cores, use the downcore_mode and disable_smt devicetree settings instead.");
+static void ap_get_cache(void *cache_list) +{ + struct cpu_cache_info *info = (struct cpu_cache_info *)cache_list; + uint32_t leaf = DETERMINISTIC_CACHE_PARAMETERS_CPUID_AMD; + info += cpu_index(); + uint8_t level = info->level; + + struct cpuid_result cache_info_res = cpuid_ext(leaf, level); + + info->type = CPUID_CACHE_TYPE(cache_info_res); + info->level = CPUID_CACHE_LEVEL(cache_info_res); + info->num_ways = CPUID_CACHE_WAYS_OF_ASSOC(cache_info_res) + 1; + info->num_sets = CPUID_CACHE_NO_OF_SETS(cache_info_res) + 1; + info->line_size = CPUID_CACHE_COHER_LINE(cache_info_res) + 1; + info->physical_partitions = CPUID_CACHE_PHYS_LINE(cache_info_res) + 1; + info->num_cores_shared = CPUID_CACHE_SHARING_CACHE(cache_info_res) + 1; + info->fully_associative = CPUID_CACHE_FULL_ASSOC(cache_info_res); + info->size = get_cache_size(info); + + struct cpuid_result res = cpuid(0x8000001e); + unsigned int cpuid_cpu_id = res.ebx & 0xff; + info->id = cpuid_cpu_id >> __fls(info->num_cores_shared); +} + +bool fill_cpu_cache_info(uint8_t level, struct cpu_cache_info *info) +{ + uint32_t total_cache = 0; + + if (!info) + return false; + + uint32_t leaf = DETERMINISTIC_CACHE_PARAMETERS_CPUID_AMD; + struct cpuid_result cache_info_res = cpuid_ext(leaf, level); + + if (!cache_info_res.eax) { + // No cache at current level. + info->type = 0; + return true; + } + + bool seen_cache_ids[CONFIG_MAX_CPUS] = {false}; + struct cpu_cache_info info_list[CONFIG_MAX_CPUS]; + for (int i = 0; i < CONFIG_MAX_CPUS; i++) + info_list[i].level = level; + + mp_run_on_all_cpus_synchronously(ap_get_cache, info_list); + + memcpy( info, &info_list[0], sizeof(*info) ); + + for (int i = 0; i < get_cpu_count(); i++ ) { + printk(BIOS_SPEW, "CPU %d:\n", i); + printk(BIOS_SPEW, " Cache Level: %d\n", info_list[i].level); + printk(BIOS_SPEW, " Cache Type: %d\n", info_list[i].type); + printk(BIOS_SPEW, " Cache Size: %zu byte\n", info_list[i].size); + printk(BIOS_SPEW, " Line Size: %zu bytes\n", info_list[i].line_size); + printk(BIOS_SPEW, " Number of Ways: %zu\n", info_list[i].num_ways); + printk(BIOS_SPEW, " Number of Sets: %zu\n", info_list[i].num_sets); + printk(BIOS_SPEW, " Physical Partitions: %zu\n", info_list[i].physical_partitions); + printk(BIOS_SPEW, " Number of Cores Sharing Cache: %zu\n", info_list[i].num_cores_shared); + printk(BIOS_SPEW, " Fully Associative: %s\n", info_list[i].fully_associative ? "Yes" : "No"); + printk(BIOS_SPEW, " Cache ID:: %d\n", info_list[i].id); + + if (!seen_cache_ids[info_list[i].id] && level == 3) { + total_cache += info_list[i].size; + seen_cache_ids[info_list[i].id] = true; + } + } + if (level == 3) { + info->num_cores_shared = get_cpu_count(); + info->size = total_cache; + } + + printk(BIOS_DEBUG, ": Max cache at level: %d is:%ld\n", level, info->size); + return true; +} + static void zen_2_3_init(struct device *dev) { check_mca();