summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq.c24
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/cpu.c33
4 files changed, 49 insertions, 14 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 25acf478c9e8..23a63207d747 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -38,7 +38,6 @@ static struct cpufreq_driver *cpufreq_driver;
static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
static DEFINE_SPINLOCK(cpufreq_driver_lock);
-
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static void handle_update(void *data);
@@ -1115,24 +1114,21 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
int retval = -EINVAL;
/*
- * Converted the lock_cpu_hotplug to preempt_disable()
- * and preempt_enable(). This is a bit kludgy and relies on how cpu
- * hotplug works. All we need is a guarantee that cpu hotplug won't make
- * progress on any cpu. Once we do preempt_disable(), this would ensure
- * that hotplug threads don't get onto this cpu, thereby delaying
- * the cpu remove process.
- *
- * We removed the lock_cpu_hotplug since we need to call this function
- * via cpu hotplug callbacks, which result in locking the cpu hotplug
- * thread itself. Agree this is not very clean, cpufreq community
- * could improve this if required. - Ashok Raj <ashok.raj@intel.com>
+ * If we are already in context of hotplug thread, we dont need to
+ * acquire the hotplug lock. Otherwise acquire cpucontrol to prevent
+ * hotplug from removing this cpu that we are working on.
*/
- preempt_disable();
+ if (!current_in_cpu_hotplug())
+ lock_cpu_hotplug();
+
dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
- preempt_enable();
+
+ if (!current_in_cpu_hotplug())
+ unlock_cpu_hotplug();
+
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 1f7b2c097503..43c44530ef9d 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -42,6 +42,7 @@ struct notifier_block;
/* Need to know about CPUs going up/down? */
extern int register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
+extern int current_in_cpu_hotplug(void);
int cpu_up(unsigned int cpu);
@@ -54,6 +55,10 @@ static inline int register_cpu_notifier(struct notifier_block *nb)
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
+static inline int current_in_cpu_hotplug(void)
+{
+ return 0;
+}
#endif /* CONFIG_SMP */
extern struct sysdev_class cpu_sysdev_class;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 03b68a7b4b82..2bbf968b23d9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -909,6 +909,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
+#define PF_HOTPLUG_CPU 0x01000000 /* Currently performing CPU hotplug */
/*
* Only the _current_ task can read/write to tsk->flags, but other
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3619e939182e..d61ba88f34e5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -21,6 +21,24 @@ EXPORT_SYMBOL_GPL(cpucontrol);
static struct notifier_block *cpu_chain;
+/*
+ * Used to check by callers if they need to acquire the cpucontrol
+ * or not to protect a cpu from being removed. Its sometimes required to
+ * call these functions both for normal operations, and in response to
+ * a cpu being added/removed. If the context of the call is in the same
+ * thread context as a CPU hotplug thread, we dont need to take the lock
+ * since its already protected
+ * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
+ */
+
+int current_in_cpu_hotplug(void)
+{
+ return (current->flags & PF_HOTPLUG_CPU);
+}
+
+EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
+
+
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
@@ -94,6 +112,13 @@ int cpu_down(unsigned int cpu)
goto out;
}
+ /*
+ * Leave a trace in current->flags indicating we are already in
+ * process of performing CPU hotplug. Callers can check if cpucontrol
+ * is already acquired by current thread, and if so not cause
+ * a dead lock by not acquiring the lock
+ */
+ current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu);
if (err == NOTIFY_BAD) {
@@ -146,6 +171,7 @@ out_thread:
out_allowed:
set_cpus_allowed(current, old_allowed);
out:
+ current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug();
return err;
}
@@ -163,6 +189,12 @@ int __devinit cpu_up(unsigned int cpu)
ret = -EINVAL;
goto out;
}
+
+ /*
+ * Leave a trace in current->flags indicating we are already in
+ * process of performing CPU hotplug.
+ */
+ current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n",
@@ -185,6 +217,7 @@ out_notify:
if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out:
+ current->flags &= ~PF_HOTPLUG_CPU;
up(&cpucontrol);
return ret;
}