diff options
author | Aaron Durbin <adurbin@chromium.org> | 2016-12-07 00:32:19 -0600 |
---|---|---|
committer | Aaron Durbin <adurbin@chromium.org> | 2016-12-08 21:39:43 +0100 |
commit | b21e362e93993a8879906cf3fa56586b84226920 (patch) | |
tree | 0536129b69e5d52d35814f43f16db69d8a42e6e7 | |
parent | 16bd2676ce1dcec342de19640c45bd7216ba70f1 (diff) | |
download | coreboot-b21e362e93993a8879906cf3fa56586b84226920.tar.gz coreboot-b21e362e93993a8879906cf3fa56586b84226920.tar.bz2 coreboot-b21e362e93993a8879906cf3fa56586b84226920.zip |
cpu/x86: allow AP callbacks after MP init
There are circumstances where the APs need to run a piece of
code later in the boot flow. The current MP init just parks
the APs after MP init is completed so there's not an opportunity
to target running a piece of code on all the APs at a later time.
Therefore, provide an option, PARALLEL_MP_AP_WORK, that allows
the APs to perform callbacks.
BUG=chrome-os-partner:60657
BRANCH=reef
Change-Id: I849ecfdd6641dd9424943e246317cd1996ef1ba6
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: https://review.coreboot.org/17745
Tested-by: build bot (Jenkins)
Reviewed-by: Furquan Shaikh <furquan@google.com>
Reviewed-by: Lijian Zhao <lijian.zhao@intel.com>
-rw-r--r-- | src/arch/x86/cpu.c | 13 | ||||
-rw-r--r-- | src/cpu/x86/Kconfig | 6 | ||||
-rw-r--r-- | src/cpu/x86/mp_init.c | 107 | ||||
-rw-r--r-- | src/include/cpu/x86/mp.h | 18 |
4 files changed, 137 insertions, 7 deletions
diff --git a/src/arch/x86/cpu.c b/src/arch/x86/cpu.c index fbd48b0c2b9f..1e74d0cd5988 100644 --- a/src/arch/x86/cpu.c +++ b/src/arch/x86/cpu.c @@ -11,11 +11,13 @@ * GNU General Public License for more details. */ +#include <bootstate.h> #include <boot/coreboot_tables.h> #include <console/console.h> #include <cpu/cpu.h> #include <arch/io.h> #include <string.h> +#include <cpu/x86/mp.h> #include <cpu/x86/mtrr.h> #include <cpu/x86/msr.h> #include <cpu/x86/lapic.h> @@ -310,3 +312,14 @@ void lb_arch_add_records(struct lb_header *header) tsc_info->size = sizeof(*tsc_info); tsc_info->freq_khz = freq_khz; } + +void arch_bootstate_coreboot_exit(void) +{ + /* APs are already parked by existing infrastructure. */ + if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK)) + return; + + /* APs are waiting for work. Last thing to do is park them. */ + if (mp_park_aps()) + printk(BIOS_ERR, "Parking APs failed.\n"); +} diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig index 3e56d721f593..2e233cc778f8 100644 --- a/src/cpu/x86/Kconfig +++ b/src/cpu/x86/Kconfig @@ -10,6 +10,12 @@ config PARALLEL_MP in parallel. It additionally provides a more flexible mechanism for sequencing the steps of bringing up the APs. +config PARALLEL_MP_AP_WORK + def_bool n + depends on PARALLEL_MP + help + Allow APs to do other work after initialization instead of going + to sleep. config UDELAY_IO bool diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index baa3599a9480..c98996344212 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -127,6 +127,7 @@ struct mp_flight_plan { struct mp_flight_record *records; }; +static int global_num_aps; static struct mp_flight_plan mp_info; struct cpu_map { @@ -185,6 +186,11 @@ static void ap_do_flight_plan(void) } } +static void park_this_cpu(void) +{ + stop_this_cpu(); +} + /* By the time APs call ap_init() caching has been setup, and microcode has * been loaded. */ static void asmlinkage ap_init(unsigned int cpu) @@ -210,7 +216,7 @@ static void asmlinkage ap_init(unsigned int cpu) ap_do_flight_plan(); /* Park the AP. */ - stop_this_cpu(); + park_this_cpu(); } static void setup_default_sipi_vector_params(struct sipi_params *sp) @@ -587,7 +593,6 @@ static void init_bsp(struct bus *cpu_bus) static int mp_init(struct bus *cpu_bus, struct mp_params *p) { int num_cpus; - int num_aps; atomic_t *ap_count; init_bsp(cpu_bus); @@ -621,11 +626,11 @@ static int mp_init(struct bus *cpu_bus, struct mp_params *p) wbinvd(); /* Start the APs providing number of APs and the cpus_entered field. */ - num_aps = p->num_cpus - 1; - if (start_aps(cpu_bus, num_aps, ap_count) < 0) { + global_num_aps = p->num_cpus - 1; + if (start_aps(cpu_bus, global_num_aps, ap_count) < 0) { mdelay(1000); printk(BIOS_DEBUG, "%d/%d eventually checked in?\n", - atomic_read(ap_count), num_aps); + atomic_read(ap_count), global_num_aps); return -1; } @@ -838,6 +843,94 @@ static void trigger_smm_relocation(void) mp_state.ops.per_cpu_smm_trigger(); } +static mp_callback_t ap_callbacks[CONFIG_MAX_CPUS]; + +static mp_callback_t read_callback(mp_callback_t *slot) +{ + return *(volatile mp_callback_t *)slot; +} + +static void store_callback(mp_callback_t *slot, mp_callback_t value) +{ + *(volatile mp_callback_t *)slot = value; +} + +static int run_ap_work(mp_callback_t func, long expire_us) +{ + int i; + int cpus_accepted; + struct stopwatch sw; + int cur_cpu = cpu_index(); + + if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK)) { + printk(BIOS_ERR, "APs already parked. PARALLEL_MP_AP_WORK not selected.\n"); + return -1; + } + + /* Signal to all the APs to run the func. */ + for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) { + if (cur_cpu == i) + continue; + store_callback(&ap_callbacks[i], func); + } + mfence(); + + /* Wait for all the APs to signal back that call has been accepted. */ + stopwatch_init_usecs_expire(&sw, expire_us); + for (cpus_accepted = 0; !stopwatch_expired(&sw); cpus_accepted = 0) { + for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) { + if (cur_cpu == i) + continue; + if (read_callback(&ap_callbacks[i]) == NULL) + cpus_accepted++; + } + if (cpus_accepted == global_num_aps) + return 0; + } + + printk(BIOS_ERR, "AP call expired. %d/%d CPUs accepted.\n", + cpus_accepted, global_num_aps); + return -1; +} + +static void ap_wait_for_instruction(void) +{ + int cur_cpu = cpu_index(); + + if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK)) + return; + + while (1) { + mp_callback_t func = read_callback(&ap_callbacks[cur_cpu]); + + if (func == NULL) { + asm ("pause"); + continue; + } + + store_callback(&ap_callbacks[cur_cpu], NULL); + mfence(); + func(); + } +} + +int mp_run_on_aps(void (*func)(void), long expire_us) +{ + return run_ap_work(func, expire_us); +} + +int mp_run_on_all_cpus(void (*func)(void), long expire_us) +{ + /* Run on BSP first. */ + func(); + return mp_run_on_aps(func, expire_us); +} + +int mp_park_aps(void) +{ + return mp_run_on_aps(park_this_cpu, 10 * USECS_PER_MSEC); +} + static struct mp_flight_record mp_steps[] = { /* Once the APs are up load the SMM handlers. */ MP_FR_BLOCK_APS(NULL, load_smm_handlers), @@ -845,8 +938,8 @@ static struct mp_flight_record mp_steps[] = { MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation), /* Initialize each CPU through the driver framework. */ MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), - /* Wait for APs to finish everything else then let them park. */ - MP_FR_BLOCK_APS(NULL, NULL), + /* Wait for APs to finish then optionally start looking for work. */ + MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL), }; static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops) diff --git a/src/include/cpu/x86/mp.h b/src/include/cpu/x86/mp.h index 6d51d7b0050d..b9b4d5772cd0 100644 --- a/src/include/cpu/x86/mp.h +++ b/src/include/cpu/x86/mp.h @@ -124,6 +124,24 @@ struct mp_ops { */ int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops); + +/* + * After APs are up and PARALLEL_MP_AP_WORK is enabled one can issue work + * to all the APs to perform. Currently the BSP is the only CPU that is allowed + * to issue work. i.e. the APs should not call any of these functions. + * All functions return < 0 on error, 0 on success. + */ +int mp_run_on_aps(void (*func)(void), long expire_us); + +/* Like mp_run_on_aps() but also runs func on BSP. */ +int mp_run_on_all_cpus(void (*func)(void), long expire_us); + +/* + * Park all APs to prepare for OS boot. This is handled automatically + * by the coreboot infrastructure. + */ +int mp_park_aps(void); + /* * SMM helpers to use with initializing CPUs. */ |