/* SPDX-License-Identifier: GPL-2.0-only */ #include #include #include #include #include #include #include #include #include #include #include #include #include static void initialize_microcode(void) { const void *microcode_patch = intel_microcode_find(); intel_microcode_load_unlocked(microcode_patch); } static void init_one_cpu(struct device *dev) { soc_core_init(dev); initialize_microcode(); } static struct device_operations cpu_dev_ops = { .init = init_one_cpu, }; static const struct cpu_device_id cpu_table[] = { { X86_VENDOR_INTEL, CPUID_METEORLAKE_A0_1, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_METEORLAKE_A0_2, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_METEORLAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_METEORLAKE_C0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_SKYLAKE_C0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_SKYLAKE_D0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_SKYLAKE_HQ0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_SKYLAKE_HR0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_KABYLAKE_G0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_KABYLAKE_H0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_KABYLAKE_Y0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_KABYLAKE_HA0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_KABYLAKE_HB0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_CANNONLAKE_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_CANNONLAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_CANNONLAKE_C0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_CANNONLAKE_D0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_E0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_GLK_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_GLK_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_GLK_R0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_WHISKEYLAKE_V0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_WHISKEYLAKE_W0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COFFEELAKE_U0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COFFEELAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COFFEELAKE_P0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COFFEELAKE_R0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COMETLAKE_U_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COMETLAKE_U_K0_S0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_6_2_G0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_6_2_G1, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_10_2_P0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_10_2_P1, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_10_2_Q0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_TIGERLAKE_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_TIGERLAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_TIGERLAKE_R0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ELKHARTLAKE_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ELKHARTLAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_JASPERLAKE_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_C0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_G0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_H0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_J0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_K0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_L0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_Q0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_R0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_ALDERLAKE_N_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_J0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_Q0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_A0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_B0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_C0, CPUID_EXACT_MATCH_MASK }, { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_H0, CPUID_EXACT_MATCH_MASK }, CPU_TABLE_END }; static const struct cpu_driver driver __cpu_driver = { .ops = &cpu_dev_ops, .id_table = cpu_table, }; /* * MP Init callback function to Find CPU Topology. This function is common * among all SOCs and thus its in Common CPU block. */ int get_cpu_count(void) { unsigned int num_virt_cores, num_phys_cores; cpu_read_topology(&num_phys_cores, &num_virt_cores); printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n", num_phys_cores, num_virt_cores); return num_virt_cores; } /* * MP Init callback function(get_microcode_info) to find the Microcode at * Pre MP Init phase. This function is common among all SOCs and thus its in * Common CPU block. * This function also fills in the microcode patch (in *microcode), and also * sets the argument *parallel to 1, which allows microcode loading in all * APs to occur in parallel during MP Init. */ void get_microcode_info(const void **microcode, int *parallel) { *microcode = intel_microcode_find(); *parallel = 1; } /* * Perform BSP and AP initialization * This function can be called in below cases: * 1. During coreboot is doing MP initialization as part of BS_DEV_INIT_CHIPS (exclude * this call if user has selected USE_INTEL_FSP_MP_INIT). * 2. coreboot would like to take APs control back after FSP-S has done with MP * initialization based on user select USE_INTEL_FSP_MP_INIT. * * This function would use cpu_cluster as a device and APIC device as a linked list to * the cpu cluster. This function adds a node in case the mainboard doesn't have a lapic id * hardcoded in devicetree, and then fills with the actual BSP APIC ID. * This allows coreboot to dynamically detect the LAPIC ID of BSP. * In case the mainboard has an APIC ID defined in devicetree, a link will be present and * creation of the new node will be skipped. This node will have the APIC ID defined * in devicetree. */ static void init_cpus(void) { struct device *dev = dev_find_path(NULL, DEVICE_PATH_CPU_CLUSTER); assert(dev != NULL); mp_cpu_bus_init(dev); } static void coreboot_init_cpus(void *unused) { if (CONFIG(USE_INTEL_FSP_MP_INIT)) return; initialize_microcode(); init_cpus(); } static void post_cpus_add_romcache(void) { if (!CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) return; fast_spi_cache_bios_region(); } static void wrapper_x86_setup_mtrrs(void *unused) { x86_setup_mtrrs_with_detect(); } static void wrapper_set_bios_done(void *unused) { cpu_soc_bios_done(); } static void wrapper_init_core_prmrr(void *unused) { init_core_prmrr(); } void before_post_cpus_init(void) { /* * Ensure all APs finish the task and continue if coreboot decides to * perform multiprocessor initialization using native coreboot drivers * instead using FSP MP PPI implementation. * * Ignore if USE_COREBOOT_MP_INIT is not enabled. */ if (!CONFIG(USE_COREBOOT_MP_INIT)) return; if (mp_run_on_all_cpus(wrapper_init_core_prmrr, NULL) != CB_SUCCESS) printk(BIOS_ERR, "core PRMRR sync failure\n"); if (mp_run_on_all_cpus(wrapper_set_bios_done, NULL) != CB_SUCCESS) printk(BIOS_ERR, "Set BIOS Done failure\n"); intel_reload_microcode(); } /* Ensure to re-program all MTRRs based on DRAM resource settings */ static void post_cpus_init(void *unused) { /* Ensure all APs finish the task and continue */ if (mp_run_on_all_cpus_synchronously(&wrapper_x86_setup_mtrrs, NULL) != CB_SUCCESS) printk(BIOS_ERR, "MTRR programming failure\n"); post_cpus_add_romcache(); x86_mtrr_check(); } /* Do CPU MP Init before FSP Silicon Init */ BOOT_STATE_INIT_ENTRY(BS_DEV_INIT_CHIPS, BS_ON_ENTRY, coreboot_init_cpus, NULL); BOOT_STATE_INIT_ENTRY(BS_WRITE_TABLES, BS_ON_EXIT, post_cpus_init, NULL); BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, post_cpus_init, NULL);