summaryrefslogtreecommitdiffstats
path: root/src/cpu/x86/mp_init.c
diff options
context:
space:
mode:
authorKane Chen <kane.chen@intel.corp-partner.google.com>2022-04-29 16:35:23 +0800
committerWerner Zeh <werner.zeh@siemens.com>2022-05-16 04:49:25 +0000
commitc9b1f8a28e90ff6890a4f74559f5136edc5a33f9 (patch)
treecff9eb50eb85a3bf618c7c24e22d750604eb45c4 /src/cpu/x86/mp_init.c
parent8b02bd1f8de71d852daf41df80d6bb27e86b90f7 (diff)
downloadcoreboot-c9b1f8a28e90ff6890a4f74559f5136edc5a33f9.tar.gz
coreboot-c9b1f8a28e90ff6890a4f74559f5136edc5a33f9.tar.bz2
coreboot-c9b1f8a28e90ff6890a4f74559f5136edc5a33f9.zip
cpu/x86/mp_init.c: Add mp_run_on_all_cpus_synchronously
MTRR is a core level register which means 2 threads in one core share same MTRR. There is a race condition could happen that AP overrides BSP MTRR unintentionally. In order to prevent such race condition between BSP and APs, this patch provides a function to let BSP assign tasks to all APs and wait them to complete the assigned tasks. BUG=b:225766934 Change-Id: I8d1d49bca410c821a3ad0347548afc42eb860594 Signed-off-by: Kane Chen <kane.chen@intel.corp-partner.google.com> Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/63566 Reviewed-by: Subrata Banik <subratabanik@google.com> Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/cpu/x86/mp_init.c')
-rw-r--r--src/cpu/x86/mp_init.c66
1 files changed, 57 insertions, 9 deletions
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c
index 51884f64a95d..c4e175f49381 100644
--- a/src/cpu/x86/mp_init.c
+++ b/src/cpu/x86/mp_init.c
@@ -859,6 +859,15 @@ static void trigger_smm_relocation(void)
static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS];
+enum AP_STATUS {
+ /* AP takes the task but not yet finishes */
+ AP_BUSY = 1,
+ /* AP finishes the task or no task to run yet */
+ AP_NOT_BUSY
+};
+
+static atomic_t ap_status[CONFIG_MAX_CPUS];
+
static struct mp_callback *read_callback(struct mp_callback **slot)
{
struct mp_callback *ret;
@@ -880,10 +889,10 @@ static void store_callback(struct mp_callback **slot, struct mp_callback *val)
);
}
-static enum cb_err run_ap_work(struct mp_callback *val, long expire_us)
+static enum cb_err run_ap_work(struct mp_callback *val, long expire_us, bool wait_ap_finish)
{
int i;
- int cpus_accepted;
+ int cpus_accepted, cpus_finish;
struct stopwatch sw;
int cur_cpu;
@@ -913,16 +922,28 @@ static enum cb_err run_ap_work(struct mp_callback *val, long expire_us)
do {
cpus_accepted = 0;
+ cpus_finish = 0;
for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
if (cur_cpu == i)
continue;
- if (read_callback(&ap_callbacks[i]) == NULL)
+
+ if (read_callback(&ap_callbacks[i]) == NULL) {
cpus_accepted++;
+ /* Only increase cpus_finish if AP took the task and not busy */
+ if (atomic_read(&ap_status[i]) == AP_NOT_BUSY)
+ cpus_finish++;
+ }
}
+ /*
+ * if wait_ap_finish is true, need to make sure all CPUs finish task and return
+ * else just need to make sure all CPUs take task
+ */
if (cpus_accepted == global_num_aps)
- return CB_SUCCESS;
+ if (!wait_ap_finish || (cpus_finish == global_num_aps))
+ return CB_SUCCESS;
+
} while (expire_us <= 0 || !stopwatch_expired(&sw));
printk(BIOS_CRIT, "CRITICAL ERROR: AP call expired. %d/%d CPUs accepted.\n",
@@ -948,6 +969,9 @@ static void ap_wait_for_instruction(void)
per_cpu_slot = &ap_callbacks[cur_cpu];
+ /* Init ap_status[cur_cpu] to AP_NOT_BUSY and ready to take job */
+ atomic_set(&ap_status[cur_cpu], AP_NOT_BUSY);
+
while (1) {
struct mp_callback *cb = read_callback(per_cpu_slot);
@@ -955,16 +979,22 @@ static void ap_wait_for_instruction(void)
asm ("pause");
continue;
}
+ /*
+ * Set ap_status to AP_BUSY before store_callback(per_cpu_slot, NULL).
+ * it's to let BSP know APs take tasks and busy to avoid race condition.
+ */
+ atomic_set(&ap_status[cur_cpu], AP_BUSY);
/* Copy to local variable before signaling consumption. */
memcpy(&lcb, cb, sizeof(lcb));
mfence();
store_callback(per_cpu_slot, NULL);
- if (lcb.logical_cpu_number && (cur_cpu !=
- lcb.logical_cpu_number))
- continue;
- else
+
+ if (lcb.logical_cpu_number == MP_RUN_ON_ALL_CPUS ||
+ (cur_cpu == lcb.logical_cpu_number))
lcb.func(lcb.arg);
+
+ atomic_set(&ap_status[cur_cpu], AP_NOT_BUSY);
}
}
@@ -973,7 +1003,15 @@ enum cb_err mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
{
struct mp_callback lcb = { .func = func, .arg = arg,
.logical_cpu_number = logical_cpu_num};
- return run_ap_work(&lcb, expire_us);
+ return run_ap_work(&lcb, expire_us, false);
+}
+
+static enum cb_err mp_run_on_aps_and_wait_for_complete(void (*func)(void *), void *arg,
+ int logical_cpu_num, long expire_us)
+{
+ struct mp_callback lcb = { .func = func, .arg = arg,
+ .logical_cpu_number = logical_cpu_num};
+ return run_ap_work(&lcb, expire_us, true);
}
enum cb_err mp_run_on_all_aps(void (*func)(void *), void *arg, long expire_us,
@@ -1008,6 +1046,16 @@ enum cb_err mp_run_on_all_cpus(void (*func)(void *), void *arg)
return mp_run_on_aps(func, arg, MP_RUN_ON_ALL_CPUS, 1000 * USECS_PER_MSEC);
}
+enum cb_err mp_run_on_all_cpus_synchronously(void (*func)(void *), void *arg)
+{
+ /* Run on BSP first. */
+ func(arg);
+
+ /* For up to 1 second for AP to finish previous work. */
+ return mp_run_on_aps_and_wait_for_complete(func, arg, MP_RUN_ON_ALL_CPUS,
+ 1000 * USECS_PER_MSEC);
+}
+
enum cb_err mp_park_aps(void)
{
struct stopwatch sw;