summaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2007-11-19 21:43:22 -0500
committerLen Brown <len.brown@intel.com>2007-11-19 21:43:22 -0500
commitddc081a19585c8ba5aad437779950c2ef215360a (patch)
treebea2be9bb259a336cb3be7f0babfb5bd571517da /drivers/acpi/processor_idle.c
parent5062911830a66df0c0ad28c387a8c0623cb0d28c (diff)
downloadlinux-ddc081a19585c8ba5aad437779950c2ef215360a.tar.gz
linux-ddc081a19585c8ba5aad437779950c2ef215360a.tar.bz2
linux-ddc081a19585c8ba5aad437779950c2ef215360a.zip
cpuidle: fix HP nx6125 regression
Fix for http://bugzilla.kernel.org/show_bug.cgi?id=9355 cpuidle always used to fallback to C2 if there is some bm activity while entering C3. But, presence of C2 is not always guaranteed. Change cpuidle algorithm to detect a safe_state to fallback in case of bm_activity and use that state instead of C2. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c125
1 files changed, 55 insertions, 70 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 1af0694e8520..8904f5c82a1c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
}
+static void acpi_safe_halt(void)
+{
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we
+ * test NEED_RESCHED:
+ */
+ smp_mb();
+ if (!need_resched())
+ safe_halt();
+ current_thread_info()->status |= TS_POLLING;
+}
+
#ifndef CONFIG_CPU_IDLE
static void
@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr,
return;
}
-static void acpi_safe_halt(void)
-{
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
- if (!need_resched())
- safe_halt();
- current_thread_info()->status |= TS_POLLING;
-}
-
static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */
@@ -1385,15 +1385,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (pr->flags.bm_check)
acpi_idle_update_bm_rld(pr, cx);
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
- if (!need_resched())
- safe_halt();
- current_thread_info()->status |= TS_POLLING;
+ acpi_safe_halt();
cx->usage++;
@@ -1493,6 +1485,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
+ if (acpi_idle_bm_check()) {
+ if (dev->safe_state) {
+ return dev->safe_state->enter(dev, dev->safe_state);
+ } else {
+ acpi_safe_halt();
+ return 0;
+ }
+ }
+
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -1515,49 +1516,39 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
*/
acpi_state_timer_broadcast(pr, cx, 1);
- if (acpi_idle_bm_check()) {
- cx = pr->power.bm_state;
-
- acpi_idle_update_bm_rld(pr, cx);
-
- t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- acpi_idle_do_entry(cx);
- t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- } else {
- acpi_idle_update_bm_rld(pr, cx);
+ acpi_idle_update_bm_rld(pr, cx);
- /*
- * disable bus master
- * bm_check implies we need ARB_DIS
- * !bm_check implies we need cache flush
- * bm_control implies whether we can do ARB_DIS
- *
- * That leaves a case where bm_check is set and bm_control is
- * not set. In that case we cannot do much, we enter C3
- * without doing anything.
- */
- if (pr->flags.bm_check && pr->flags.bm_control) {
- spin_lock(&c3_lock);
- c3_cpu_count++;
- /* Disable bus master arbitration when all CPUs are in C3 */
- if (c3_cpu_count == num_online_cpus())
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
- spin_unlock(&c3_lock);
- } else if (!pr->flags.bm_check) {
- ACPI_FLUSH_CPU_CACHE();
- }
+ /*
+ * disable bus master
+ * bm_check implies we need ARB_DIS
+ * !bm_check implies we need cache flush
+ * bm_control implies whether we can do ARB_DIS
+ *
+ * That leaves a case where bm_check is set and bm_control is
+ * not set. In that case we cannot do much, we enter C3
+ * without doing anything.
+ */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+ spin_lock(&c3_lock);
+ c3_cpu_count++;
+ /* Disable bus master arbitration when all CPUs are in C3 */
+ if (c3_cpu_count == num_online_cpus())
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
+ spin_unlock(&c3_lock);
+ } else if (!pr->flags.bm_check) {
+ ACPI_FLUSH_CPU_CACHE();
+ }
- t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- acpi_idle_do_entry(cx);
- t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ acpi_idle_do_entry(cx);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- /* Re-enable bus master arbitration */
- if (pr->flags.bm_check && pr->flags.bm_control) {
- spin_lock(&c3_lock);
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
- c3_cpu_count--;
- spin_unlock(&c3_lock);
- }
+ /* Re-enable bus master arbitration */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+ spin_lock(&c3_lock);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
+ c3_cpu_count--;
+ spin_unlock(&c3_lock);
}
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
@@ -1626,12 +1617,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
case ACPI_STATE_C1:
state->flags |= CPUIDLE_FLAG_SHALLOW;
state->enter = acpi_idle_enter_c1;
+ dev->safe_state = state;
break;
case ACPI_STATE_C2:
state->flags |= CPUIDLE_FLAG_BALANCED;
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple;
+ dev->safe_state = state;
break;
case ACPI_STATE_C3:
@@ -1652,14 +1645,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
if (!count)
return -EINVAL;
- /* find the deepest state that can handle active BM */
- if (pr->flags.bm_check) {
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
- if (pr->power.states[i].type == ACPI_STATE_C3)
- break;
- pr->power.bm_state = &pr->power.states[i-1];
- }
-
return 0;
}