summaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /kernel/cpu.c
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2
linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c193
1 files changed, 193 insertions, 0 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
new file mode 100644
index 000000000000..628f4ccda127
--- /dev/null
+++ b/kernel/cpu.c
@@ -0,0 +1,193 @@
+/* CPU control.
+ * (C) 2001, 2002, 2003, 2004 Rusty Russell
+ *
+ * This code is licenced under the GPL.
+ */
+#include <linux/proc_fs.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/unistd.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/stop_machine.h>
+#include <asm/semaphore.h>
+
+/* This protects CPUs going up and down... */
+DECLARE_MUTEX(cpucontrol);
+
+static struct notifier_block *cpu_chain;
+
+/* Need to know about CPUs going up/down? */
+int register_cpu_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ if ((ret = down_interruptible(&cpucontrol)) != 0)
+ return ret;
+ ret = notifier_chain_register(&cpu_chain, nb);
+ up(&cpucontrol);
+ return ret;
+}
+EXPORT_SYMBOL(register_cpu_notifier);
+
+void unregister_cpu_notifier(struct notifier_block *nb)
+{
+ down(&cpucontrol);
+ notifier_chain_unregister(&cpu_chain, nb);
+ up(&cpucontrol);
+}
+EXPORT_SYMBOL(unregister_cpu_notifier);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void check_for_tasks(int cpu)
+{
+ struct task_struct *p;
+
+ write_lock_irq(&tasklist_lock);
+ for_each_process(p) {
+ if (task_cpu(p) == cpu &&
+ (!cputime_eq(p->utime, cputime_zero) ||
+ !cputime_eq(p->stime, cputime_zero)))
+ printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
+ (state = %ld, flags = %lx) \n",
+ p->comm, p->pid, cpu, p->state, p->flags);
+ }
+ write_unlock_irq(&tasklist_lock);
+}
+
+/* Take this CPU down. */
+static int take_cpu_down(void *unused)
+{
+ int err;
+
+ /* Take offline: makes arch_cpu_down somewhat easier. */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+
+ /* Ensure this CPU doesn't handle any more interrupts. */
+ err = __cpu_disable();
+ if (err < 0)
+ cpu_set(smp_processor_id(), cpu_online_map);
+ else
+ /* Force idle task to run as soon as we yield: it should
+ immediately notice cpu is offline and die quickly. */
+ sched_idle_next();
+
+ return err;
+}
+
+int cpu_down(unsigned int cpu)
+{
+ int err;
+ struct task_struct *p;
+ cpumask_t old_allowed, tmp;
+
+ if ((err = lock_cpu_hotplug_interruptible()) != 0)
+ return err;
+
+ if (num_online_cpus() == 1) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!cpu_online(cpu)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
+ (void *)(long)cpu);
+ if (err == NOTIFY_BAD) {
+ printk("%s: attempt to take down CPU %u failed\n",
+ __FUNCTION__, cpu);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Ensure that we are not runnable on dying cpu */
+ old_allowed = current->cpus_allowed;
+ tmp = CPU_MASK_ALL;
+ cpu_clear(cpu, tmp);
+ set_cpus_allowed(current, tmp);
+
+ p = __stop_machine_run(take_cpu_down, NULL, cpu);
+ if (IS_ERR(p)) {
+ /* CPU didn't die: tell everyone. Can't complain. */
+ if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
+ (void *)(long)cpu) == NOTIFY_BAD)
+ BUG();
+
+ err = PTR_ERR(p);
+ goto out_allowed;
+ }
+
+ if (cpu_online(cpu))
+ goto out_thread;
+
+ /* Wait for it to sleep (leaving idle task). */
+ while (!idle_cpu(cpu))
+ yield();
+
+ /* This actually kills the CPU. */
+ __cpu_die(cpu);
+
+ /* Move it here so it can run. */
+ kthread_bind(p, get_cpu());
+ put_cpu();
+
+ /* CPU is completely dead: tell everyone. Too late to complain. */
+ if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
+ == NOTIFY_BAD)
+ BUG();
+
+ check_for_tasks(cpu);
+
+out_thread:
+ err = kthread_stop(p);
+out_allowed:
+ set_cpus_allowed(current, old_allowed);
+out:
+ unlock_cpu_hotplug();
+ return err;
+}
+#endif /*CONFIG_HOTPLUG_CPU*/
+
+int __devinit cpu_up(unsigned int cpu)
+{
+ int ret;
+ void *hcpu = (void *)(long)cpu;
+
+ if ((ret = down_interruptible(&cpucontrol)) != 0)
+ return ret;
+
+ if (cpu_online(cpu) || !cpu_present(cpu)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
+ if (ret == NOTIFY_BAD) {
+ printk("%s: attempt to bring up CPU %u failed\n",
+ __FUNCTION__, cpu);
+ ret = -EINVAL;
+ goto out_notify;
+ }
+
+ /* Arch-specific enabling code. */
+ ret = __cpu_up(cpu);
+ if (ret != 0)
+ goto out_notify;
+ if (!cpu_online(cpu))
+ BUG();
+
+ /* Now call notifier in preparation. */
+ notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+
+out_notify:
+ if (ret != 0)
+ notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
+out:
+ up(&cpucontrol);
+ return ret;
+}