From 70f2817a43c89b784dc2ec3d06ba5bf3064f8235 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 29 Apr 2005 01:27:34 -0500 Subject: [PATCH] sysfs: (rest) if show/store is missing return -EIO sysfs: fix the rest of the kernel so if an attribute doesn't implement show or store method read/write will return -EIO instead of 0 or -EINVAL or -EPERM. Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- kernel/params.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 5513844bec13..d586c35ef8fc 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -629,7 +629,7 @@ static ssize_t module_attr_show(struct kobject *kobj, mk = to_module_kobject(kobj); if (!attribute->show) - return -EPERM; + return -EIO; if (!try_module_get(mk->mod)) return -ENODEV; @@ -653,7 +653,7 @@ static ssize_t module_attr_store(struct kobject *kobj, mk = to_module_kobject(kobj); if (!attribute->store) - return -EPERM; + return -EIO; if (!try_module_get(mk->mod)) return -ENODEV; -- cgit v1.2.3 From 39c715b71740c4a78ba4769fb54826929bac03cb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 21 Jun 2005 17:14:34 -0700 Subject: [PATCH] smp_processor_id() cleanup This patch implements a number of smp_processor_id() cleanup ideas that Arjan van de Ven and I came up with. The previous __smp_processor_id/_smp_processor_id/smp_processor_id API spaghetti was hard to follow both on the implementational and on the usage side. Some of the complexity arose from picking wrong names, some of the complexity comes from the fact that not all architectures defined __smp_processor_id. In the new code, there are two externally visible symbols: - smp_processor_id(): debug variant. - raw_smp_processor_id(): nondebug variant. Replaces all existing uses of _smp_processor_id() and __smp_processor_id(). Defined by every SMP architecture in include/asm-*/smp.h. There is one new internal symbol, dependent on DEBUG_PREEMPT: - debug_smp_processor_id(): internal debug variant, mapped to smp_processor_id(). Also, i moved debug_smp_processor_id() from lib/kernel_lock.c into a new lib/smp_processor_id.c file. All related comments got updated and/or clarified. I have build/boot tested the following 8 .config combinations on x86: {SMP,UP} x {PREEMPT,!PREEMPT} x {DEBUG_PREEMPT,!DEBUG_PREEMPT} I have also build/boot tested x64 on UP/PREEMPT/DEBUG_PREEMPT. (Other architectures are untested, but should work just fine.) Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/module.c | 2 +- kernel/power/smp.c | 4 ++-- kernel/sched.c | 4 ++-- kernel/stop_machine.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 83b3d376708c..a566745dde62 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod) for (i = 0; i < NR_CPUS; i++) local_set(&mod->ref[i].count, 0); /* Hold reference count during initialization. */ - local_set(&mod->ref[_smp_processor_id()].count, 1); + local_set(&mod->ref[raw_smp_processor_id()].count, 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current; } diff --git a/kernel/power/smp.c b/kernel/power/smp.c index cba3584b80fe..457c2302ed42 100644 --- a/kernel/power/smp.c +++ b/kernel/power/smp.c @@ -48,11 +48,11 @@ void disable_nonboot_cpus(void) { oldmask = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(0)); - printk("Freezing CPUs (at %d)", _smp_processor_id()); + printk("Freezing CPUs (at %d)", raw_smp_processor_id()); current->state = TASK_INTERRUPTIBLE; schedule_timeout(HZ); printk("..."); - BUG_ON(_smp_processor_id() != 0); + BUG_ON(raw_smp_processor_id() != 0); /* FIXME: for this to work, all the CPUs must be running * "idle" thread (or we deadlock). Is that guaranteed? */ diff --git a/kernel/sched.c b/kernel/sched.c index f12a0c8a7d98..deca041fc364 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield); */ void __sched io_schedule(void) { - struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); + struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); atomic_inc(&rq->nr_iowait); schedule(); @@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule); long __sched io_schedule_timeout(long timeout) { - struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); + struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); long ret; atomic_inc(&rq->nr_iowait); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 6116b25aa7cf..84a9d18aa8da 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -100,7 +100,7 @@ static int stop_machine(void) stopmachine_state = STOPMACHINE_WAIT; for_each_online_cpu(i) { - if (i == _smp_processor_id()) + if (i == raw_smp_processor_id()) continue; ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); if (ret < 0) @@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, /* If they don't care which CPU fn runs on, bind to any online one. */ if (cpu == NR_CPUS) - cpu = _smp_processor_id(); + cpu = raw_smp_processor_id(); p = kthread_create(do_stop, &smdata, "kstopmachine"); if (!IS_ERR(p)) { -- cgit v1.2.3 From 753ee728964e5afb80c17659cc6c3a6fd0a42fe0 Mon Sep 17 00:00:00 2001 From: Martin Hicks Date: Tue, 21 Jun 2005 17:14:41 -0700 Subject: [PATCH] VM: early zone reclaim This is the core of the (much simplified) early reclaim. The goal of this patch is to reclaim some easily-freed pages from a zone before falling back onto another zone. One of the major uses of this is NUMA machines. With the default allocator behavior the allocator would look for memory in another zone, which might be off-node, before trying to reclaim from the current zone. This adds a zone tuneable to enable early zone reclaim. It is selected on a per-zone basis and is turned on/off via syscall. Adding some extra throttling on the reclaim was also required (patch 4/4). Without the machine would grind to a crawl when doing a "make -j" kernel build. Even with this patch the System Time is higher on average, but it seems tolerable. Here are some numbers for kernbench runs on a 2-node, 4cpu, 8Gig RAM Altix in the "make -j" run: wall user sys %cpu ctx sw. sleeps ---- ---- --- ---- ------ ------ No patch 1009 1384 847 258 298170 504402 w/patch, no reclaim 880 1376 667 288 254064 396745 w/patch & reclaim 1079 1385 926 252 291625 548873 These numbers are the average of 2 runs of 3 "make -j" runs done right after system boot. Run-to-run variability for "make -j" is huge, so these numbers aren't terribly useful except to seee that with reclaim the benchmark still finishes in a reasonable amount of time. I also looked at the NUMA hit/miss stats for the "make -j" runs and the reclaim doesn't make any difference when the machine is thrashing away. Doing a "make -j8" on a single node that is filled with page cache pages takes 700 seconds with reclaim turned on and 735 seconds without reclaim (due to remote memory accesses). The simple zone_reclaim syscall program is at http://www.bork.org/~mort/sgi/zone_reclaim.c Signed-off-by: Martin Hicks Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys_ni.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 0dda70ed1f98..6f15bea7d1a8 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -77,6 +77,7 @@ cond_syscall(sys_request_key); cond_syscall(sys_keyctl); cond_syscall(compat_sys_keyctl); cond_syscall(compat_sys_socketcall); +cond_syscall(sys_set_zone_reclaim); /* arch-specific weak syscall entries */ cond_syscall(sys_pciconfig_read); -- cgit v1.2.3 From 1363c3cd8603a913a27e2995dccbd70d5312d8e6 Mon Sep 17 00:00:00 2001 From: Wolfgang Wander Date: Tue, 21 Jun 2005 17:14:49 -0700 Subject: [PATCH] Avoiding mmap fragmentation Ingo recently introduced a great speedup for allocating new mmaps using the free_area_cache pointer which boosts the specweb SSL benchmark by 4-5% and causes huge performance increases in thread creation. The downside of this patch is that it does lead to fragmentation in the mmap-ed areas (visible via /proc/self/maps), such that some applications that work fine under 2.4 kernels quickly run out of memory on any 2.6 kernel. The problem is twofold: 1) the free_area_cache is used to continue a search for memory where the last search ended. Before the change new areas were always searched from the base address on. So now new small areas are cluttering holes of all sizes throughout the whole mmap-able region whereas before small holes tended to close holes near the base leaving holes far from the base large and available for larger requests. 2) the free_area_cache also is set to the location of the last munmap-ed area so in scenarios where we allocate e.g. five regions of 1K each, then free regions 4 2 3 in this order the next request for 1K will be placed in the position of the old region 3, whereas before we appended it to the still active region 1, placing it at the location of the old region 2. Before we had 1 free region of 2K, now we only get two free regions of 1K -> fragmentation. The patch addresses thes issues by introducing yet another cache descriptor cached_hole_size that contains the largest known hole size below the current free_area_cache. If a new request comes in the size is compared against the cached_hole_size and if the request can be filled with a hole below free_area_cache the search is started from the base instead. The results look promising: Whereas 2.6.12-rc4 fragments quickly and my (earlier posted) leakme.c test program terminates after 50000+ iterations with 96 distinct and fragmented maps in /proc/self/maps it performs nicely (as expected) with thread creation, Ingo's test_str02 with 20000 threads requires 0.7s system time. Taking out Ingo's patch (un-patch available per request) by basically deleting all mentions of free_area_cache from the kernel and starting the search for new memory always at the respective bases we observe: leakme terminates successfully with 11 distinctive hardly fragmented areas in /proc/self/maps but thread creating is gringdingly slow: 30+s(!) system time for Ingo's test_str02 with 20000 threads. Now - drumroll ;-) the appended patch works fine with leakme: it ends with only 7 distinct areas in /proc/self/maps and also thread creation seems sufficiently fast with 0.71s for 20000 threads. Signed-off-by: Wolfgang Wander Credit-to: "Richard Purdie" Signed-off-by: Ken Chen Acked-by: Ingo Molnar (partly) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index f42a17f88699..876b31cd822d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -194,6 +194,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) mm->mmap = NULL; mm->mmap_cache = NULL; mm->free_area_cache = oldmm->mmap_base; + mm->cached_hole_size = ~0UL; mm->map_count = 0; set_mm_counter(mm, rss, 0); set_mm_counter(mm, anon_rss, 0); @@ -322,6 +323,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm) mm->ioctx_list = NULL; mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; -- cgit v1.2.3 From 45918e1a8bfcabc1cb4570b8df276655020eac45 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 21 Jun 2005 17:15:08 -0700 Subject: [PATCH] dup_mmap: update comment on new vma Remove part of comment on linking new vma in dup_mmap: since anon_vma rmap came in, try_to_unmap_one knows the vma without needing find_vma. But add a comment to note that here vma is inserted without mmap_sem. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 876b31cd822d..a28d11e10877 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -250,8 +250,9 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) /* * Link in the new vma and copy the page table entries: - * link in first so that swapoff can see swap entries, - * and try_to_unmap_one's find_vma find the new vma. + * link in first so that swapoff can see swap entries. + * Note that, exceptionally, here the vma is inserted + * without holding mm->mmap_sem. */ spin_lock(&mm->page_table_lock); *pprev = tmp; -- cgit v1.2.3 From dbce706e2550253c5ab6043f4f5dfde0cd02470f Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Tue, 21 Jun 2005 17:16:19 -0700 Subject: [PATCH] uml: add and use generic hw_controller_type->release With Chris Wedgwood Currently UML must explicitly call the UML-specific free_irq_by_irq_and_dev() for each free_irq call it's done. This is needed because ->shutdown and/or ->disable are only called when the last "action" for that irq is removed. Instead, for UML shared IRQs (UML IRQs are very often, if not always, shared), for each dev_id some setup is done, which must be cleared on the release of that fd. For instance, for each open console a new instance (i.e. new dev_id) of the same IRQ is requested(). Exactly, a fd is stored in an array (pollfds), which is after read by a host thread and passed to poll(). Each event registered by poll() triggers an interrupt. So, for each free_irq() we must remove the corresponding host fd from the table, which we do via this -release() method. In this patch we add an appropriate hook for this, and remove all uses of it by pointing the hook to the said procedure; this is safe to do since the said procedure. Also some cosmetic improvements are included. This is heavily based on some work by Chris Wedgwood, which however didn't get the patch merged for something I'd call a "misunderstanding" (the need for this patch wasn't cleanly explained, thus adding the generic hook was felt as undesirable). Signed-off-by: Paolo 'Blaisorblade' Giarrusso CC: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/manage.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 5202e4c4a5b6..5fde8177eedf 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -255,6 +255,10 @@ void free_irq(unsigned int irq, void *dev_id) /* Found it - now remove it from the list of entries */ *pp = action->next; + + if (desc->handler->release) + desc->handler->release(irq, dev_id); + if (!desc->action) { desc->status |= IRQ_DISABLED; if (desc->handler->shutdown) -- cgit v1.2.3 From b77d6adc922b8bbf8b16b67f567958c42962cf88 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Tue, 21 Jun 2005 17:16:24 -0700 Subject: [PATCH] uml: make hw_controller_type->release exist only for archs needing it With Chris Wedgwood As suggested by Chris, we can make the "just added" method ->release conditional to UML only (better: to archs requesting it, i.e. only UML currently), so that other archs don't get this unneeded crud, and if UML won't need it any more we can kill this. Signed-off-by: Paolo 'Blaisorblade' Giarrusso CC: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/manage.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 5fde8177eedf..ac6700985705 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -6,6 +6,7 @@ * This file contains driver APIs to the irq subsystem. */ +#include #include #include #include @@ -256,8 +257,11 @@ void free_irq(unsigned int irq, void *dev_id) /* Found it - now remove it from the list of entries */ *pp = action->next; + /* Currently used only by UML, might disappear one day.*/ +#ifdef CONFIG_IRQ_RELEASE_METHOD if (desc->handler->release) desc->handler->release(irq, dev_id); +#endif if (!desc->action) { desc->status |= IRQ_DISABLED; -- cgit v1.2.3 From 59121003721a8fad11ee72e646fd9d3076b5679c Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 23 Jun 2005 00:08:25 -0700 Subject: [PATCH] i386: Selectable Frequency of the Timer Interrupt Make the timer frequency selectable. The timer interrupt may cause bus and memory contention in large NUMA systems since the interrupt occurs on each processor HZ times per second. Signed-off-by: Christoph Lameter Signed-off-by: Shai Fultheim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Kconfig.hz | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 kernel/Kconfig.hz (limited to 'kernel') diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz new file mode 100644 index 000000000000..248e1c396f8b --- /dev/null +++ b/kernel/Kconfig.hz @@ -0,0 +1,46 @@ +# +# Timer Interrupt Frequency Configuration +# + +choice + prompt "Timer frequency" + default HZ_250 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 HZ but 100 HZ may be more + beneficial for servers and NUMA systems that do not need to have + a fast response for user interaction and that may experience bus + contention and cacheline bounces as a result of timer interrupts. + Note that the timer interrupt occurs on each processor in an SMP + environment leading to NR_CPUS * HZ number of timer interrupts + per second. + + + config HZ_100 + bool "100 HZ" + help + 100 HZ is a typical choice for servers, SMP and NUMA systems + with lots of processors that may show reduced performance if + too many timer interrupts are occurring. + + config HZ_250 + bool "250 HZ" + help + 250 HZ is a good compromise choice allowing server performance + while also showing good interactive responsiveness even + on SMP and NUMA systems. + + config HZ_1000 + bool "1000 HZ" + help + 1000 HZ is the preferred choice for desktop systems and other + systems requiring fast interactive responses to events. + +endchoice + +config HZ + int + default 100 if HZ_100 + default 250 if HZ_250 + default 1000 if HZ_1000 + -- cgit v1.2.3 From 55c888d6d09a0df236adfaf8ccf06ff5d0646775 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 23 Jun 2005 00:08:56 -0700 Subject: [PATCH] timers fixes/improvements This patch tries to solve following problems: 1. del_timer_sync() is racy. The timer can be fired again after del_timer_sync have checked all cpus and before it will recheck timer_pending(). 2. It has scalability problems. All cpus are scanned to determine if the timer is running on that cpu. With this patch del_timer_sync is O(1) and no slower than plain del_timer(pending_timer), unless it has to actually wait for completion of the currently running timer. The only restriction is that the recurring timer should not use add_timer_on(). 3. The timers are not serialized wrt to itself. If CPU_0 does mod_timer(jiffies+1) while the timer is currently running on CPU 1, it is quite possible that local interrupt on CPU_0 will start that timer before it finished on CPU_1. 4. The timers locking is suboptimal. __mod_timer() takes 3 locks at once and still requires wmb() in del_timer/run_timers. The new implementation takes 2 locks sequentially and does not need memory barriers. Currently ->base != NULL means that the timer is pending. In that case ->base.lock is used to lock the timer. __mod_timer also takes timer->lock because ->base can be == NULL. This patch uses timer->entry.next != NULL as indication that the timer is pending. So it does __list_del(), entry->next = NULL instead of list_del() when the timer is deleted. The ->base field is used for hashed locking only, it is initialized in init_timer() which sets ->base = per_cpu(tvec_bases). When the tvec_bases.lock is locked, it means that all timers which are tied to this base via timer->base are locked, and the base itself is locked too. So __run_timers/migrate_timers can safely modify all timers which could be found on ->tvX lists (pending timers). When the timer's base is locked, and the timer removed from ->entry list (which means that _run_timers/migrate_timers can't see this timer), it is possible to set timer->base = NULL and drop the lock: the timer remains locked. This patch adds lock_timer_base() helper, which waits for ->base != NULL, locks the ->base, and checks it is still the same. __mod_timer() schedules the timer on the local CPU and changes it's base. However, it does not lock both old and new bases at once. It locks the timer via lock_timer_base(), deletes the timer, sets ->base = NULL, and unlocks old base. Then __mod_timer() locks new_base, sets ->base = new_base, and adds this timer. This simplifies the code, because AB-BA deadlock is not possible. __mod_timer() also ensures that the timer's base is not changed while the timer's handler is running on the old base. __run_timers(), del_timer() do not change ->base anymore, they only clear pending flag. So del_timer_sync() can test timer->base->running_timer == timer to detect whether it is running or not. We don't need timer_list->lock anymore, this patch kills it. We also don't need barriers. del_timer() and __run_timers() used smp_wmb() before clearing timer's pending flag. It was needed because __mod_timer() did not lock old_base if the timer is not pending, so __mod_timer()->list_add() could race with del_timer()->list_del(). With this patch these functions are serialized through base->lock. One problem. TIMER_INITIALIZER can't use per_cpu(tvec_bases). So this patch adds global struct timer_base_s { spinlock_t lock; struct timer_list *running_timer; } __init_timer_base; which is used by TIMER_INITIALIZER. The corresponding fields in tvec_t_base_s struct are replaced by struct timer_base_s t_base. It is indeed ugly. But this can't have scalability problems. The global __init_timer_base.lock is used only when __mod_timer() is called for the first time AND the timer was compile time initialized. After that the timer migrates to the local CPU. Signed-off-by: Oleg Nesterov Acked-by: Ingo Molnar Signed-off-by: Renaud Lienhart Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 328 +++++++++++++++++++++++++++------------------------------ 1 file changed, 157 insertions(+), 171 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 207aa4f0aa10..8aadc62efd65 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -57,6 +57,11 @@ static void time_interpolator_update(long delta_nsec); #define TVN_MASK (TVN_SIZE - 1) #define TVR_MASK (TVR_SIZE - 1) +struct timer_base_s { + spinlock_t lock; + struct timer_list *running_timer; +}; + typedef struct tvec_s { struct list_head vec[TVN_SIZE]; } tvec_t; @@ -66,9 +71,8 @@ typedef struct tvec_root_s { } tvec_root_t; struct tvec_t_base_s { - spinlock_t lock; + struct timer_base_s t_base; unsigned long timer_jiffies; - struct timer_list *running_timer; tvec_root_t tv1; tvec_t tv2; tvec_t tv3; @@ -77,18 +81,16 @@ struct tvec_t_base_s { } ____cacheline_aligned_in_smp; typedef struct tvec_t_base_s tvec_base_t; +static DEFINE_PER_CPU(tvec_base_t, tvec_bases); static inline void set_running_timer(tvec_base_t *base, struct timer_list *timer) { #ifdef CONFIG_SMP - base->running_timer = timer; + base->t_base.running_timer = timer; #endif } -/* Fake initialization */ -static DEFINE_PER_CPU(tvec_base_t, tvec_bases) = { SPIN_LOCK_UNLOCKED }; - static void check_timer_failed(struct timer_list *timer) { static int whine_count; @@ -103,7 +105,6 @@ static void check_timer_failed(struct timer_list *timer) /* * Now fix it up */ - spin_lock_init(&timer->lock); timer->magic = TIMER_MAGIC; } @@ -156,65 +157,113 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) list_add_tail(&timer->entry, vec); } +typedef struct timer_base_s timer_base_t; +/* + * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases) + * at compile time, and we need timer->base to lock the timer. + */ +timer_base_t __init_timer_base + ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED }; +EXPORT_SYMBOL(__init_timer_base); + +/*** + * init_timer - initialize a timer. + * @timer: the timer to be initialized + * + * init_timer() must be done to a timer prior calling *any* of the + * other timer functions. + */ +void fastcall init_timer(struct timer_list *timer) +{ + timer->entry.next = NULL; + timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; + timer->magic = TIMER_MAGIC; +} +EXPORT_SYMBOL(init_timer); + +static inline void detach_timer(struct timer_list *timer, + int clear_pending) +{ + struct list_head *entry = &timer->entry; + + __list_del(entry->prev, entry->next); + if (clear_pending) + entry->next = NULL; + entry->prev = LIST_POISON2; +} + +/* + * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock + * means that all timers which are tied to this base via timer->base are + * locked, and the base itself is locked too. + * + * So __run_timers/migrate_timers can safely modify all timers which could + * be found on ->tvX lists. + * + * When the timer's base is locked, and the timer removed from list, it is + * possible to set timer->base = NULL and drop the lock: the timer remains + * locked. + */ +static timer_base_t *lock_timer_base(struct timer_list *timer, + unsigned long *flags) +{ + timer_base_t *base; + + for (;;) { + base = timer->base; + if (likely(base != NULL)) { + spin_lock_irqsave(&base->lock, *flags); + if (likely(base == timer->base)) + return base; + /* The timer has migrated to another CPU */ + spin_unlock_irqrestore(&base->lock, *flags); + } + cpu_relax(); + } +} + int __mod_timer(struct timer_list *timer, unsigned long expires) { - tvec_base_t *old_base, *new_base; + timer_base_t *base; + tvec_base_t *new_base; unsigned long flags; int ret = 0; BUG_ON(!timer->function); - check_timer(timer); - spin_lock_irqsave(&timer->lock, flags); + base = lock_timer_base(timer, &flags); + + if (timer_pending(timer)) { + detach_timer(timer, 0); + ret = 1; + } + new_base = &__get_cpu_var(tvec_bases); -repeat: - old_base = timer->base; - /* - * Prevent deadlocks via ordering by old_base < new_base. - */ - if (old_base && (new_base != old_base)) { - if (old_base < new_base) { - spin_lock(&new_base->lock); - spin_lock(&old_base->lock); - } else { - spin_lock(&old_base->lock); - spin_lock(&new_base->lock); - } + if (base != &new_base->t_base) { /* - * The timer base might have been cancelled while we were - * trying to take the lock(s): + * We are trying to schedule the timer on the local CPU. + * However we can't change timer's base while it is running, + * otherwise del_timer_sync() can't detect that the timer's + * handler yet has not finished. This also guarantees that + * the timer is serialized wrt itself. */ - if (timer->base != old_base) { - spin_unlock(&new_base->lock); - spin_unlock(&old_base->lock); - goto repeat; - } - } else { - spin_lock(&new_base->lock); - if (timer->base != old_base) { - spin_unlock(&new_base->lock); - goto repeat; + if (unlikely(base->running_timer == timer)) { + /* The timer remains on a former base */ + new_base = container_of(base, tvec_base_t, t_base); + } else { + /* See the comment in lock_timer_base() */ + timer->base = NULL; + spin_unlock(&base->lock); + spin_lock(&new_base->t_base.lock); + timer->base = &new_base->t_base; } } - /* - * Delete the previous timeout (if there was any), and install - * the new one: - */ - if (old_base) { - list_del(&timer->entry); - ret = 1; - } timer->expires = expires; internal_add_timer(new_base, timer); - timer->base = new_base; - - if (old_base && (new_base != old_base)) - spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); - spin_unlock_irqrestore(&timer->lock, flags); + spin_unlock_irqrestore(&new_base->t_base.lock, flags); return ret; } @@ -232,15 +281,15 @@ void add_timer_on(struct timer_list *timer, int cpu) { tvec_base_t *base = &per_cpu(tvec_bases, cpu); unsigned long flags; - + BUG_ON(timer_pending(timer) || !timer->function); check_timer(timer); - spin_lock_irqsave(&base->lock, flags); + spin_lock_irqsave(&base->t_base.lock, flags); + timer->base = &base->t_base; internal_add_timer(base, timer); - timer->base = base; - spin_unlock_irqrestore(&base->lock, flags); + spin_unlock_irqrestore(&base->t_base.lock, flags); } @@ -295,27 +344,22 @@ EXPORT_SYMBOL(mod_timer); */ int del_timer(struct timer_list *timer) { + timer_base_t *base; unsigned long flags; - tvec_base_t *base; + int ret = 0; check_timer(timer); -repeat: - base = timer->base; - if (!base) - return 0; - spin_lock_irqsave(&base->lock, flags); - if (base != timer->base) { + if (timer_pending(timer)) { + base = lock_timer_base(timer, &flags); + if (timer_pending(timer)) { + detach_timer(timer, 1); + ret = 1; + } spin_unlock_irqrestore(&base->lock, flags); - goto repeat; } - list_del(&timer->entry); - /* Need to make sure that anybody who sees a NULL base also sees the list ops */ - smp_wmb(); - timer->base = NULL; - spin_unlock_irqrestore(&base->lock, flags); - return 1; + return ret; } EXPORT_SYMBOL(del_timer); @@ -332,72 +376,39 @@ EXPORT_SYMBOL(del_timer); * Synchronization rules: callers must prevent restarting of the timer, * otherwise this function is meaningless. It must not be called from * interrupt contexts. The caller must not hold locks which would prevent - * completion of the timer's handler. Upon exit the timer is not queued and - * the handler is not running on any CPU. + * completion of the timer's handler. The timer's handler must not call + * add_timer_on(). Upon exit the timer is not queued and the handler is + * not running on any CPU. * * The function returns whether it has deactivated a pending timer or not. - * - * del_timer_sync() is slow and complicated because it copes with timer - * handlers which re-arm the timer (periodic timers). If the timer handler - * is known to not do this (a single shot timer) then use - * del_singleshot_timer_sync() instead. */ int del_timer_sync(struct timer_list *timer) { - tvec_base_t *base; - int i, ret = 0; + timer_base_t *base; + unsigned long flags; + int ret = -1; check_timer(timer); -del_again: - ret += del_timer(timer); + do { + base = lock_timer_base(timer, &flags); - for_each_online_cpu(i) { - base = &per_cpu(tvec_bases, i); - if (base->running_timer == timer) { - while (base->running_timer == timer) { - cpu_relax(); - preempt_check_resched(); - } - break; + if (base->running_timer == timer) + goto unlock; + + ret = 0; + if (timer_pending(timer)) { + detach_timer(timer, 1); + ret = 1; } - } - smp_rmb(); - if (timer_pending(timer)) - goto del_again; +unlock: + spin_unlock_irqrestore(&base->lock, flags); + } while (ret < 0); return ret; } -EXPORT_SYMBOL(del_timer_sync); -/*** - * del_singleshot_timer_sync - deactivate a non-recursive timer - * @timer: the timer to be deactivated - * - * This function is an optimization of del_timer_sync for the case where the - * caller can guarantee the timer does not reschedule itself in its timer - * function. - * - * Synchronization rules: callers must prevent restarting of the timer, - * otherwise this function is meaningless. It must not be called from - * interrupt contexts. The caller must not hold locks which wold prevent - * completion of the timer's handler. Upon exit the timer is not queued and - * the handler is not running on any CPU. - * - * The function returns whether it has deactivated a pending timer or not. - */ -int del_singleshot_timer_sync(struct timer_list *timer) -{ - int ret = del_timer(timer); - - if (!ret) { - ret = del_timer_sync(timer); - BUG_ON(ret); - } - - return ret; -} -EXPORT_SYMBOL(del_singleshot_timer_sync); +EXPORT_SYMBOL(del_timer_sync); #endif static int cascade(tvec_base_t *base, tvec_t *tv, int index) @@ -415,7 +426,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index) struct timer_list *tmp; tmp = list_entry(curr, struct timer_list, entry); - BUG_ON(tmp->base != base); + BUG_ON(tmp->base != &base->t_base); curr = curr->next; internal_add_timer(base, tmp); } @@ -437,7 +448,7 @@ static inline void __run_timers(tvec_base_t *base) { struct timer_list *timer; - spin_lock_irq(&base->lock); + spin_lock_irq(&base->t_base.lock); while (time_after_eq(jiffies, base->timer_jiffies)) { struct list_head work_list = LIST_HEAD_INIT(work_list); struct list_head *head = &work_list; @@ -453,8 +464,7 @@ static inline void __run_timers(tvec_base_t *base) cascade(base, &base->tv5, INDEX(3)); ++base->timer_jiffies; list_splice_init(base->tv1.vec + index, &work_list); -repeat: - if (!list_empty(head)) { + while (!list_empty(head)) { void (*fn)(unsigned long); unsigned long data; @@ -462,11 +472,9 @@ repeat: fn = timer->function; data = timer->data; - list_del(&timer->entry); set_running_timer(base, timer); - smp_wmb(); - timer->base = NULL; - spin_unlock_irq(&base->lock); + detach_timer(timer, 1); + spin_unlock_irq(&base->t_base.lock); { u32 preempt_count = preempt_count(); fn(data); @@ -475,12 +483,11 @@ repeat: BUG(); } } - spin_lock_irq(&base->lock); - goto repeat; + spin_lock_irq(&base->t_base.lock); } } set_running_timer(base, NULL); - spin_unlock_irq(&base->lock); + spin_unlock_irq(&base->t_base.lock); } #ifdef CONFIG_NO_IDLE_HZ @@ -499,7 +506,7 @@ unsigned long next_timer_interrupt(void) int i, j; base = &__get_cpu_var(tvec_bases); - spin_lock(&base->lock); + spin_lock(&base->t_base.lock); expires = base->timer_jiffies + (LONG_MAX >> 1); list = 0; @@ -547,7 +554,7 @@ found: expires = nte->expires; } } - spin_unlock(&base->lock); + spin_unlock(&base->t_base.lock); return expires; } #endif @@ -1286,9 +1293,9 @@ static void __devinit init_timers_cpu(int cpu) { int j; tvec_base_t *base; - + base = &per_cpu(tvec_bases, cpu); - spin_lock_init(&base->lock); + spin_lock_init(&base->t_base.lock); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); INIT_LIST_HEAD(base->tv4.vec + j); @@ -1302,22 +1309,16 @@ static void __devinit init_timers_cpu(int cpu) } #ifdef CONFIG_HOTPLUG_CPU -static int migrate_timer_list(tvec_base_t *new_base, struct list_head *head) +static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) { struct timer_list *timer; while (!list_empty(head)) { timer = list_entry(head->next, struct timer_list, entry); - /* We're locking backwards from __mod_timer order here, - beware deadlock. */ - if (!spin_trylock(&timer->lock)) - return 0; - list_del(&timer->entry); + detach_timer(timer, 0); + timer->base = &new_base->t_base; internal_add_timer(new_base, timer); - timer->base = new_base; - spin_unlock(&timer->lock); } - return 1; } static void __devinit migrate_timers(int cpu) @@ -1331,39 +1332,24 @@ static void __devinit migrate_timers(int cpu) new_base = &get_cpu_var(tvec_bases); local_irq_disable(); -again: - /* Prevent deadlocks via ordering by old_base < new_base. */ - if (old_base < new_base) { - spin_lock(&new_base->lock); - spin_lock(&old_base->lock); - } else { - spin_lock(&old_base->lock); - spin_lock(&new_base->lock); - } + spin_lock(&new_base->t_base.lock); + spin_lock(&old_base->t_base.lock); - if (old_base->running_timer) + if (old_base->t_base.running_timer) BUG(); for (i = 0; i < TVR_SIZE; i++) - if (!migrate_timer_list(new_base, old_base->tv1.vec + i)) - goto unlock_again; - for (i = 0; i < TVN_SIZE; i++) - if (!migrate_timer_list(new_base, old_base->tv2.vec + i) - || !migrate_timer_list(new_base, old_base->tv3.vec + i) - || !migrate_timer_list(new_base, old_base->tv4.vec + i) - || !migrate_timer_list(new_base, old_base->tv5.vec + i)) - goto unlock_again; - spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); + migrate_timer_list(new_base, old_base->tv1.vec + i); + for (i = 0; i < TVN_SIZE; i++) { + migrate_timer_list(new_base, old_base->tv2.vec + i); + migrate_timer_list(new_base, old_base->tv3.vec + i); + migrate_timer_list(new_base, old_base->tv4.vec + i); + migrate_timer_list(new_base, old_base->tv5.vec + i); + } + + spin_unlock(&old_base->t_base.lock); + spin_unlock(&new_base->t_base.lock); local_irq_enable(); put_cpu_var(tvec_bases); - return; - -unlock_again: - /* Avoid deadlock with __mod_timer, by backing off. */ - spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); - cpu_relax(); - goto again; } #endif /* CONFIG_HOTPLUG_CPU */ -- cgit v1.2.3 From fd450b7318b75343fd76b3d95416853e34e72c95 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 23 Jun 2005 00:08:59 -0700 Subject: [PATCH] timers: introduce try_to_del_timer_sync() This patch splits del_timer_sync() into 2 functions. The new one, try_to_del_timer_sync(), returns -1 when it hits executing timer. It can be used in interrupt context, or when the caller hold locks which can prevent completion of the timer's handler. NOTE. Currently it can't be used in interrupt context in UP case, because ->running_timer is used only with CONFIG_SMP. Should the need arise, it is possible to kill #ifdef CONFIG_SMP in set_running_timer(), it is cheap. Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 53 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 8aadc62efd65..1f986c16d89f 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -365,6 +365,34 @@ int del_timer(struct timer_list *timer) EXPORT_SYMBOL(del_timer); #ifdef CONFIG_SMP +/* + * This function tries to deactivate a timer. Upon successful (ret >= 0) + * exit the timer is not queued and the handler is not running on any CPU. + * + * It must not be called from interrupt contexts. + */ +int try_to_del_timer_sync(struct timer_list *timer) +{ + timer_base_t *base; + unsigned long flags; + int ret = -1; + + base = lock_timer_base(timer, &flags); + + if (base->running_timer == timer) + goto out; + + ret = 0; + if (timer_pending(timer)) { + detach_timer(timer, 1); + ret = 1; + } +out: + spin_unlock_irqrestore(&base->lock, flags); + + return ret; +} + /*** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated @@ -384,28 +412,13 @@ EXPORT_SYMBOL(del_timer); */ int del_timer_sync(struct timer_list *timer) { - timer_base_t *base; - unsigned long flags; - int ret = -1; - check_timer(timer); - do { - base = lock_timer_base(timer, &flags); - - if (base->running_timer == timer) - goto unlock; - - ret = 0; - if (timer_pending(timer)) { - detach_timer(timer, 1); - ret = 1; - } -unlock: - spin_unlock_irqrestore(&base->lock, flags); - } while (ret < 0); - - return ret; + for (;;) { + int ret = try_to_del_timer_sync(timer); + if (ret >= 0) + return ret; + } } EXPORT_SYMBOL(del_timer_sync); -- cgit v1.2.3 From f972be33ce6a08b5f096ba013c7459a3a82f5f39 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 23 Jun 2005 00:09:00 -0700 Subject: [PATCH] posix-timers: use try_to_del_timer_sync() sys_timer_settime/sys_timer_delete needs to delete k_itimer->real.timer synchronously while holding ->it_lock, which is also locked in posix_timer_fn. This patch removes timer_active/set_timer_inactive which plays with timer_list's internals in favour of using try_to_del_timer_sync(), which was introduced in the previous patch. Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/posix-timers.c | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index cabb63fc9e16..5b7b4736d82b 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -88,23 +88,6 @@ static kmem_cache_t *posix_timers_cache; static struct idr posix_timers_id; static DEFINE_SPINLOCK(idr_lock); -/* - * Just because the timer is not in the timer list does NOT mean it is - * inactive. It could be in the "fire" routine getting a new expire time. - */ -#define TIMER_INACTIVE 1 - -#ifdef CONFIG_SMP -# define timer_active(tmr) \ - ((tmr)->it.real.timer.entry.prev != (void *)TIMER_INACTIVE) -# define set_timer_inactive(tmr) \ - do { \ - (tmr)->it.real.timer.entry.prev = (void *)TIMER_INACTIVE; \ - } while (0) -#else -# define timer_active(tmr) BARFY // error to use outside of SMP -# define set_timer_inactive(tmr) do { } while (0) -#endif /* * we assume that the new SIGEV_THREAD_ID shares no bits with the other * SIGEV values. Here we put out an error if this assumption fails. @@ -226,7 +209,6 @@ static inline int common_timer_create(struct k_itimer *new_timer) init_timer(&new_timer->it.real.timer); new_timer->it.real.timer.data = (unsigned long) new_timer; new_timer->it.real.timer.function = posix_timer_fn; - set_timer_inactive(new_timer); return 0; } @@ -480,7 +462,6 @@ static void posix_timer_fn(unsigned long __data) int do_notify = 1; spin_lock_irqsave(&timr->it_lock, flags); - set_timer_inactive(timr); if (!list_empty(&timr->it.real.abs_timer_entry)) { spin_lock(&abs_list.lock); do { @@ -983,8 +964,8 @@ common_timer_set(struct k_itimer *timr, int flags, * careful here. If smp we could be in the "fire" routine which will * be spinning as we hold the lock. But this is ONLY an SMP issue. */ + if (try_to_del_timer_sync(&timr->it.real.timer) < 0) { #ifdef CONFIG_SMP - if (timer_active(timr) && !del_timer(&timr->it.real.timer)) /* * It can only be active if on an other cpu. Since * we have cleared the interval stuff above, it should @@ -994,11 +975,9 @@ common_timer_set(struct k_itimer *timr, int flags, * a "retry" exit status. */ return TIMER_RETRY; - - set_timer_inactive(timr); -#else - del_timer(&timr->it.real.timer); #endif + } + remove_from_abslist(timr); timr->it_requeue_pending = (timr->it_requeue_pending + 2) & @@ -1083,8 +1062,9 @@ retry: static inline int common_timer_del(struct k_itimer *timer) { timer->it.real.incr = 0; + + if (try_to_del_timer_sync(&timer->it.real.timer) < 0) { #ifdef CONFIG_SMP - if (timer_active(timer) && !del_timer(&timer->it.real.timer)) /* * It can only be active if on an other cpu. Since * we have cleared the interval stuff above, it should @@ -1094,9 +1074,9 @@ static inline int common_timer_del(struct k_itimer *timer) * a "retry" exit status. */ return TIMER_RETRY; -#else - del_timer(&timer->it.real.timer); #endif + } + remove_from_abslist(timer); return 0; -- cgit v1.2.3 From ab4af03a4054bd78bcabfb2214c9597201beae35 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Thu, 23 Jun 2005 00:09:05 -0700 Subject: [PATCH] CON_CONSDEV bit not set correctly on last console According to include/linux/console.h, CON_CONSDEV flag should be set on the last console specified on the boot command line: 86 #define CON_PRINTBUFFER (1) 87 #define CON_CONSDEV (2) /* Last on the command line */ 88 #define CON_ENABLED (4) 89 #define CON_BOOT (8) This does not currently happen if there is more than one console specified on the boot commandline. Instead, it gets set on the first console on the command line. This can cause problems for things like kdb that look for the CON_CONSDEV flag to see if the console is valid. Additionaly, it doesn't look like CON_CONSDEV is reassigned to the next preferred console at unregister time if the console being unregistered currently has that bit set. Example (from sn2 ia64): elilo vmlinuz root= console=ttyS0 console=ttySG0 in this case, the flags on ttySG console struct will be 0x4 (should be 0x6). Attached patch against bk fixes both issues for the cases I looked at. It uses selected_console (which gets incremented for each console specified on the command line) as the indicator of which console to set CON_CONSDEV on. When adding the console to the list, if the previous one had CON_CONSDEV set, it masks it out. Tested on ia64 and x86. The problem with the current behavior is it breaks overriding the default from the boot line. In the ia64 case, there may be a global append line defining console=a in elilo.conf. Then you want to boot your kernel, and want to override the default by passing console=b on the boot line. elilo constructs the kernel cmdline by starting with the value of the global append line, then tacks on whatever else you specify, which puts console=b last. Signed-off-by: Greg Edwards Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 01b58d7d17ff..3a442bfb8bee 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -876,8 +876,10 @@ void register_console(struct console * console) break; console->flags |= CON_ENABLED; console->index = console_cmdline[i].index; - if (i == preferred_console) + if (i == selected_console) { console->flags |= CON_CONSDEV; + preferred_console = selected_console; + } break; } @@ -897,6 +899,8 @@ void register_console(struct console * console) if ((console->flags & CON_CONSDEV) || console_drivers == NULL) { console->next = console_drivers; console_drivers = console; + if (console->next) + console->next->flags &= ~CON_CONSDEV; } else { console->next = console_drivers->next; console_drivers->next = console; @@ -937,10 +941,14 @@ int unregister_console(struct console * console) /* If last console is removed, we re-enable picking the first * one that gets registered. Without that, pmac early boot console * would prevent fbcon from taking over. + * + * If this isn't the last console and it has CON_CONSDEV set, we + * need to set it on the next preferred console. */ if (console_drivers == NULL) preferred_console = selected_console; - + else if (console->flags & CON_CONSDEV) + console_drivers->flags |= CON_CONSDEV; release_console_sem(); return res; -- cgit v1.2.3 From be5b4fbd017d12e0d09ea0528a5839ce2ed2c8c8 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 23 Jun 2005 00:09:09 -0700 Subject: [PATCH] preempt_count is int - remove cast and don't assign to unsigned type In kernel/sched.c the return value from preempt_count() is cast to an int. That made sense when preempt_count was defined as different types on is not needed and should go away. The patch removes the cast. In kernel/timer.c the return value from preempt_count() is assigned to a variable of type u32 and then that unsigned value is later compared to preempt_count(). Since preempt_count() returns an int, an int is what should be used to store its return value. Storing the result in an unsigned 32bit integer made a tiny bit of sense back when preempt_count was different types on different archs, but no more - let's not play signed vs unsigned comparison games when we don't have to. The patch modifies the code to use an int to hold the value. While I was around that bit of code I also made two changes to a nearby (related) printk() - I modified it to specify the loglevel explicitly and also broke the line into a few pieces to avoid it being longer than 80 chars and clarified the text a bit. Signed-off-by: Jesper Juhl Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 2 +- kernel/timer.c | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index deca041fc364..6ee4515d5a20 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2576,7 +2576,7 @@ void fastcall add_preempt_count(int val) /* * Underflow? */ - BUG_ON(((int)preempt_count() < 0)); + BUG_ON((preempt_count() < 0)); preempt_count() += val; /* * Spinlock count overflowing soon? diff --git a/kernel/timer.c b/kernel/timer.c index 1f986c16d89f..51ff917c9590 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -489,10 +489,14 @@ static inline void __run_timers(tvec_base_t *base) detach_timer(timer, 1); spin_unlock_irq(&base->t_base.lock); { - u32 preempt_count = preempt_count(); + int preempt_count = preempt_count(); fn(data); if (preempt_count != preempt_count()) { - printk("huh, entered %p with %08x, exited with %08x?\n", fn, preempt_count, preempt_count()); + printk(KERN_WARNING "huh, entered %p " + "with preempt_count %08x, exited" + " with %08x?\n", + fn, preempt_count, + preempt_count()); BUG(); } } -- cgit v1.2.3 From 5f45f1a78fbac3cc859ec10c5366e97d20d40fa2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jun 2005 00:09:12 -0700 Subject: [PATCH] remove duplicate get_dentry functions in various places Various filesystem drivers have grown a get_dentry() function that's a duplicate of lookup_one_len, except that it doesn't take a maximum length argument and doesn't check for \0 or / in the passed in filename. Switch all these places to use lookup_one_len. Signed-off-by: Christoph Hellwig Cc: Greg KH Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 00e8f2575512..79dd929f4084 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -228,13 +228,7 @@ static struct dentry_operations cpuset_dops = { static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) { - struct qstr qstr; - struct dentry *d; - - qstr.name = name; - qstr.len = strlen(name); - qstr.hash = full_name_hash(name, qstr.len); - d = lookup_hash(&qstr, parent); + struct dentry *d = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(d)) d->d_op = &cpuset_dops; return d; -- cgit v1.2.3 From df164db5fd16888ddbe2a63a47b2f6dda9a428b5 Mon Sep 17 00:00:00 2001 From: Alexander Nyberg Date: Thu, 23 Jun 2005 00:09:13 -0700 Subject: [PATCH] avoid resursive oopses Prevent recursive faults in do_exit() by leaving the task alone and wait for reboot. This may allow a more graceful shutdown and possibly save the original oops. Signed-off-by: Alexander Nyberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 2ef2ad540201..c2bdf6fb61a5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -793,6 +793,17 @@ fastcall NORET_TYPE void do_exit(long code) ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); } + /* + * We're taking recursive faults here in do_exit. Safest is to just + * leave this task alone and wait for reboot. + */ + if (unlikely(tsk->flags & PF_EXITING)) { + printk(KERN_ALERT + "Fixing recursive fault but reboot is needed!\n"); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + } + tsk->flags |= PF_EXITING; /* -- cgit v1.2.3 From b94cce926b2b902b79380ccba370d6f9f2980de0 Mon Sep 17 00:00:00 2001 From: Hien Nguyen Date: Thu, 23 Jun 2005 00:09:19 -0700 Subject: [PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen Signed-off-by: Prasanna S Panchamukhi Signed-off-by: Frederik Deweerdt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 213 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 208 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 037142b72a49..692fbf75ab49 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -27,6 +27,9 @@ * interface to access function arguments. * 2004-Sep Prasanna S Panchamukhi Changed Kprobes * exceptions notifier to be first on the priority list. + * 2005-May Hien Nguyen , Jim Keniston + * and Prasanna S Panchamukhi + * added function-return probes. */ #include #include @@ -41,6 +44,7 @@ #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; +static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; unsigned int kprobe_cpu = NR_CPUS; static DEFINE_SPINLOCK(kprobe_lock); @@ -78,7 +82,7 @@ struct kprobe *get_kprobe(void *addr) * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list */ -int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) +static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe *kp; @@ -92,8 +96,8 @@ int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) return 0; } -void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, - unsigned long flags) +static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, + unsigned long flags) { struct kprobe *kp; @@ -107,7 +111,8 @@ void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, return; } -int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) +static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, + int trapnr) { /* * if we faulted "during" the execution of a user specified @@ -120,6 +125,135 @@ int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) return 0; } +struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .pre_handler = trampoline_probe_handler, + .post_handler = trampoline_post_handler +}; + +struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) +{ + struct hlist_node *node; + struct kretprobe_instance *ri; + hlist_for_each_entry(ri, node, &rp->free_instances, uflist) + return ri; + return NULL; +} + +static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp) +{ + struct hlist_node *node; + struct kretprobe_instance *ri; + hlist_for_each_entry(ri, node, &rp->used_instances, uflist) + return ri; + return NULL; +} + +struct kretprobe_instance *get_rp_inst(void *sara) +{ + struct hlist_head *head; + struct hlist_node *node; + struct task_struct *tsk; + struct kretprobe_instance *ri; + + tsk = arch_get_kprobe_task(sara); + head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; + hlist_for_each_entry(ri, node, head, hlist) { + if (ri->stack_addr == sara) + return ri; + } + return NULL; +} + +void add_rp_inst(struct kretprobe_instance *ri) +{ + struct task_struct *tsk; + /* + * Remove rp inst off the free list - + * Add it back when probed function returns + */ + hlist_del(&ri->uflist); + tsk = arch_get_kprobe_task(ri->stack_addr); + /* Add rp inst onto table */ + INIT_HLIST_NODE(&ri->hlist); + hlist_add_head(&ri->hlist, + &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]); + + /* Also add this rp inst to the used list. */ + INIT_HLIST_NODE(&ri->uflist); + hlist_add_head(&ri->uflist, &ri->rp->used_instances); +} + +void recycle_rp_inst(struct kretprobe_instance *ri) +{ + /* remove rp inst off the rprobe_inst_table */ + hlist_del(&ri->hlist); + if (ri->rp) { + /* remove rp inst off the used list */ + hlist_del(&ri->uflist); + /* put rp inst back onto the free list */ + INIT_HLIST_NODE(&ri->uflist); + hlist_add_head(&ri->uflist, &ri->rp->free_instances); + } else + /* Unregistering */ + kfree(ri); +} + +struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk) +{ + return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; +} + +struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk) +{ + struct task_struct *tsk; + struct hlist_head *head; + struct hlist_node *node; + struct kretprobe_instance *ri; + + head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)]; + + hlist_for_each_entry(ri, node, head, hlist) { + tsk = arch_get_kprobe_task(ri->stack_addr); + if (tsk == tk) + return ri; + } + return NULL; +} + +/* + * This function is called from do_exit or do_execv when task tk's stack is + * about to be recycled. Recycle any function-return probe instances + * associated with this task. These represent probed functions that have + * been called but may never return. + */ +void kprobe_flush_task(struct task_struct *tk) +{ + arch_kprobe_flush_task(tk, &kprobe_lock); +} + +/* + * This kprobe pre_handler is registered with every kretprobe. When probe + * hits it will set up the return probe. + */ +static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) +{ + struct kretprobe *rp = container_of(p, struct kretprobe, kp); + + /*TODO: consider to only swap the RA after the last pre_handler fired */ + arch_prepare_kretprobe(rp, regs); + return 0; +} + +static inline void free_rp_inst(struct kretprobe *rp) +{ + struct kretprobe_instance *ri; + while ((ri = get_free_rp_inst(rp)) != NULL) { + hlist_del(&ri->uflist); + kfree(ri); + } +} + /* * Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe @@ -257,16 +391,82 @@ void unregister_jprobe(struct jprobe *jp) unregister_kprobe(&jp->kp); } +#ifdef ARCH_SUPPORTS_KRETPROBES + +int register_kretprobe(struct kretprobe *rp) +{ + int ret = 0; + struct kretprobe_instance *inst; + int i; + + rp->kp.pre_handler = pre_handler_kretprobe; + + /* Pre-allocate memory for max kretprobe instances */ + if (rp->maxactive <= 0) { +#ifdef CONFIG_PREEMPT + rp->maxactive = max(10, 2 * NR_CPUS); +#else + rp->maxactive = NR_CPUS; +#endif + } + INIT_HLIST_HEAD(&rp->used_instances); + INIT_HLIST_HEAD(&rp->free_instances); + for (i = 0; i < rp->maxactive; i++) { + inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); + if (inst == NULL) { + free_rp_inst(rp); + return -ENOMEM; + } + INIT_HLIST_NODE(&inst->uflist); + hlist_add_head(&inst->uflist, &rp->free_instances); + } + + rp->nmissed = 0; + /* Establish function entry probe point */ + if ((ret = register_kprobe(&rp->kp)) != 0) + free_rp_inst(rp); + return ret; +} + +#else /* ARCH_SUPPORTS_KRETPROBES */ + +int register_kretprobe(struct kretprobe *rp) +{ + return -ENOSYS; +} + +#endif /* ARCH_SUPPORTS_KRETPROBES */ + +void unregister_kretprobe(struct kretprobe *rp) +{ + unsigned long flags; + struct kretprobe_instance *ri; + + unregister_kprobe(&rp->kp); + /* No race here */ + spin_lock_irqsave(&kprobe_lock, flags); + free_rp_inst(rp); + while ((ri = get_used_rp_inst(rp)) != NULL) { + ri->rp = NULL; + hlist_del(&ri->uflist); + } + spin_unlock_irqrestore(&kprobe_lock, flags); +} + static int __init init_kprobes(void) { int i, err = 0; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ - for (i = 0; i < KPROBE_TABLE_SIZE; i++) + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { INIT_HLIST_HEAD(&kprobe_table[i]); + INIT_HLIST_HEAD(&kretprobe_inst_table[i]); + } err = register_die_notifier(&kprobe_exceptions_nb); + /* Register the trampoline probe for return probe */ + register_kprobe(&trampoline_p); return err; } @@ -277,3 +477,6 @@ EXPORT_SYMBOL_GPL(unregister_kprobe); EXPORT_SYMBOL_GPL(register_jprobe); EXPORT_SYMBOL_GPL(unregister_jprobe); EXPORT_SYMBOL_GPL(jprobe_return); +EXPORT_SYMBOL_GPL(register_kretprobe); +EXPORT_SYMBOL_GPL(unregister_kretprobe); + -- cgit v1.2.3 From 7e1048b11c5afe79aac46a42e3ccec86b8365c6d Mon Sep 17 00:00:00 2001 From: Rusty Lynch Date: Thu, 23 Jun 2005 00:09:25 -0700 Subject: [PATCH] Move kprobe [dis]arming into arch specific code The architecture independent code of the current kprobes implementation is arming and disarming kprobes at registration time. The problem is that the code is assuming that arming and disarming is a just done by a simple write of some magic value to an address. This is problematic for ia64 where our instructions look more like structures, and we can not insert break points by just doing something like: *p->addr = BREAKPOINT_INSTRUCTION; The following patch to 2.6.12-rc4-mm2 adds two new architecture dependent functions: * void arch_arm_kprobe(struct kprobe *p) * void arch_disarm_kprobe(struct kprobe *p) and then adds the new functions for each of the architectures that already implement kprobes (spar64/ppc64/i386/x86_64). I thought arch_[dis]arm_kprobe was the most descriptive of what was really happening, but each of the architectures already had a disarm_kprobe() function that was really a "disarm and do some other clean-up items as needed when you stumble across a recursive kprobe." So... I took the liberty of changing the code that was calling disarm_kprobe() to call arch_disarm_kprobe(), and then do the cleanup in the block of code dealing with the recursive kprobe case. So far this patch as been tested on i386, x86_64, and ppc64, but still needs to be tested in sparc64. Signed-off-by: Rusty Lynch Signed-off-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 692fbf75ab49..e8e0ae8a6e14 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -261,7 +261,7 @@ static inline void free_rp_inst(struct kretprobe *rp) static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) { ap->addr = p->addr; - ap->opcode = p->opcode; + memcpy(&ap->opcode, &p->opcode, sizeof(kprobe_opcode_t)); memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn)); ap->pre_handler = aggr_pre_handler; @@ -304,10 +304,8 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) /* kprobe removal house-keeping routines */ static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) { - *p->addr = p->opcode; + arch_disarm_kprobe(p); hlist_del(&p->hlist); - flush_icache_range((unsigned long) p->addr, - (unsigned long) p->addr + sizeof(kprobe_opcode_t)); spin_unlock_irqrestore(&kprobe_lock, flags); arch_remove_kprobe(p); } @@ -344,10 +342,8 @@ int register_kprobe(struct kprobe *p) hlist_add_head(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - p->opcode = *p->addr; - *p->addr = BREAKPOINT_INSTRUCTION; - flush_icache_range((unsigned long) p->addr, - (unsigned long) p->addr + sizeof(kprobe_opcode_t)); + arch_arm_kprobe(p); + out: spin_unlock_irqrestore(&kprobe_lock, flags); rm_kprobe: -- cgit v1.2.3 From 0aa55e4d7db822059fe8132fe9f2b7773c48216c Mon Sep 17 00:00:00 2001 From: Hien Nguyen Date: Thu, 23 Jun 2005 00:09:26 -0700 Subject: [PATCH] kprobes: moves lock-unlock to non-arch kprobe_flush_task This patch moves the lock/unlock of the arch specific kprobe_flush_task() to the non-arch specific kprobe_flusk_task(). Signed-off-by: Hien Nguyen Acked-by: Prasanna S Panchamukhi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e8e0ae8a6e14..dd42e717dd35 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -229,7 +229,10 @@ struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk) */ void kprobe_flush_task(struct task_struct *tk) { - arch_kprobe_flush_task(tk, &kprobe_lock); + unsigned long flags = 0; + spin_lock_irqsave(&kprobe_lock, flags); + arch_kprobe_flush_task(tk); + spin_unlock_irqrestore(&kprobe_lock, flags); } /* -- cgit v1.2.3 From ea32c65cc2d2294c04e9f81d0578a6f51febfdbf Mon Sep 17 00:00:00 2001 From: Prasanna S Panchamukhi Date: Thu, 23 Jun 2005 00:09:36 -0700 Subject: [PATCH] kprobes: Temporary disarming of reentrant probe In situations where a kprobes handler calls a routine which has a probe on it, then kprobes_handler() disarms the new probe forever. This patch removes the above limitation by temporarily disarming the new probe. When the another probe hits while handling the old probe, the kprobes_handler() saves previous kprobes state and handles the new probe without calling the new kprobes registered handlers. kprobe_post_handler() restores back the previous kprobes state and the normal execution continues. However on x86_64 architecture, re-rentrancy is provided only through pre_handler(). If a routine having probe is referenced through post_handler(), then the probes on that routine are disarmed forever, since the exception stack is gets changed after the processor single steps the instruction of the new probe. This patch includes generic changes to support temporary disarming on reentrancy of probes. Signed-of-by: Prasanna S Panchamukhi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index dd42e717dd35..456ecedff2d4 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -335,6 +335,7 @@ int register_kprobe(struct kprobe *p) } spin_lock_irqsave(&kprobe_lock, flags); old_p = get_kprobe(p->addr); + p->nmissed = 0; if (old_p) { ret = register_aggr_kprobe(old_p, p); goto out; -- cgit v1.2.3 From 8b0914ea7475615c7c8965c1ac8fe4069270f25c Mon Sep 17 00:00:00 2001 From: Prasanna S Panchamukhi Date: Thu, 23 Jun 2005 00:09:41 -0700 Subject: [PATCH] jprobes: allow a jprobe to coexist with muliple kprobes Presently either multiple kprobes or only one jprobe could be inserted. This patch removes the above limitation and allows one jprobe and multiple kprobes to coexist at the same address. However multiple jprobes cannot coexist with multiple kprobes. Currently I am working on the prototype to allow multiple jprobes coexist with multiple kprobes. Signed-off-by: Ananth N Mavinakayanhalli Signed-off-by: Prasanna S Panchamukhi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 51 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 456ecedff2d4..334f37472c56 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -89,9 +89,10 @@ static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) list_for_each_entry(kp, &p->list, list) { if (kp->pre_handler) { curr_kprobe = kp; - kp->pre_handler(kp, regs); - curr_kprobe = NULL; + if (kp->pre_handler(kp, regs)) + return 1; } + curr_kprobe = NULL; } return 0; } @@ -125,6 +126,19 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, return 0; } +static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe *kp = curr_kprobe; + if (curr_kprobe && kp->break_handler) { + if (kp->break_handler(kp, regs)) { + curr_kprobe = NULL; + return 1; + } + } + curr_kprobe = NULL; + return 0; +} + struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler, @@ -257,19 +271,46 @@ static inline void free_rp_inst(struct kretprobe *rp) } } +/* + * Keep all fields in the kprobe consistent + */ +static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) +{ + memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); + memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); +} + +/* +* Add the new probe to old_p->list. Fail if this is the +* second jprobe at the address - two jprobes can't coexist +*/ +static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p) +{ + struct kprobe *kp; + + if (p->break_handler) { + list_for_each_entry(kp, &old_p->list, list) { + if (kp->break_handler) + return -EEXIST; + } + list_add_tail(&p->list, &old_p->list); + } else + list_add(&p->list, &old_p->list); + return 0; +} + /* * Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) { + copy_kprobe(p, ap); ap->addr = p->addr; - memcpy(&ap->opcode, &p->opcode, sizeof(kprobe_opcode_t)); - memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn)); - ap->pre_handler = aggr_pre_handler; ap->post_handler = aggr_post_handler; ap->fault_handler = aggr_fault_handler; + ap->break_handler = aggr_break_handler; INIT_LIST_HEAD(&ap->list); list_add(&p->list, &ap->list); @@ -290,16 +331,16 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) int ret = 0; struct kprobe *ap; - if (old_p->break_handler || p->break_handler) { - ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */ - } else if (old_p->pre_handler == aggr_pre_handler) { - list_add(&p->list, &old_p->list); + if (old_p->pre_handler == aggr_pre_handler) { + copy_kprobe(old_p, p); + ret = add_new_kprobe(old_p, p); } else { ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); if (!ap) return -ENOMEM; add_aggr_kprobe(ap, old_p); - list_add(&p->list, &ap->list); + copy_kprobe(ap, p); + ret = add_new_kprobe(ap, p); } return ret; } -- cgit v1.2.3 From d6e711448137ca3301512cec41a2c2ce852b3d0a Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Thu, 23 Jun 2005 00:09:43 -0700 Subject: [PATCH] setuid core dump Add a new `suid_dumpable' sysctl: This value can be used to query and set the core dump mode for setuid or otherwise protected/tainted binaries. The modes are 0 - (default) - traditional behaviour. Any process which has changed privilege levels or is execute only will not be dumped 1 - (debug) - all processes dump core when possible. The core dump is owned by the current user and no security is applied. This is intended for system debugging situations only. Ptrace is unchecked. 2 - (suidsafe) - any binary which normally would not be dumped is dumped readable by root only. This allows the end user to remove such a dump but not access it directly. For security reasons core dumps in this mode will not overwrite one another or other files. This mode is appropriate when adminstrators are attempting to debug problems in a normal environment. (akpm: > > +EXPORT_SYMBOL(suid_dumpable); > > EXPORT_SYMBOL_GPL? No problem to me. > > if (current->euid == current->uid && current->egid == current->gid) > > current->mm->dumpable = 1; > > Should this be SUID_DUMP_USER? Actually the feedback I had from last time was that the SUID_ defines should go because its clearer to follow the numbers. They can go everywhere (and there are lots of places where dumpable is tested/used as a bool in untouched code) > Maybe this should be renamed to `dump_policy' or something. Doing that > would help us catch any code which isn't using the #defines, too. Fair comment. The patch was designed to be easy to maintain for Red Hat rather than for merging. Changing that field would create a gigantic diff because it is used all over the place. ) Signed-off-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 22 +++++++++++----------- kernel/sysctl.c | 9 +++++++++ 2 files changed, 20 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index f006632c2ba7..0a2c8cda9638 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -525,7 +525,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) } if (new_egid != old_egid) { - current->mm->dumpable = 0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } if (rgid != (gid_t) -1 || @@ -556,7 +556,7 @@ asmlinkage long sys_setgid(gid_t gid) { if(old_egid != gid) { - current->mm->dumpable=0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->gid = current->egid = current->sgid = current->fsgid = gid; @@ -565,7 +565,7 @@ asmlinkage long sys_setgid(gid_t gid) { if(old_egid != gid) { - current->mm->dumpable=0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->egid = current->fsgid = gid; @@ -596,7 +596,7 @@ static int set_user(uid_t new_ruid, int dumpclear) if(dumpclear) { - current->mm->dumpable = 0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->uid = new_ruid; @@ -653,7 +653,7 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) if (new_euid != old_euid) { - current->mm->dumpable=0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->fsuid = current->euid = new_euid; @@ -703,7 +703,7 @@ asmlinkage long sys_setuid(uid_t uid) if (old_euid != uid) { - current->mm->dumpable = 0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->fsuid = current->euid = uid; @@ -748,7 +748,7 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) if (euid != (uid_t) -1) { if (euid != current->euid) { - current->mm->dumpable = 0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->euid = euid; @@ -798,7 +798,7 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) if (egid != (gid_t) -1) { if (egid != current->egid) { - current->mm->dumpable = 0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->egid = egid; @@ -845,7 +845,7 @@ asmlinkage long sys_setfsuid(uid_t uid) { if (uid != old_fsuid) { - current->mm->dumpable = 0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->fsuid = uid; @@ -875,7 +875,7 @@ asmlinkage long sys_setfsgid(gid_t gid) { if (gid != old_fsgid) { - current->mm->dumpable = 0; + current->mm->dumpable = suid_dumpable; smp_wmb(); } current->fsgid = gid; @@ -1652,7 +1652,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, error = 1; break; case PR_SET_DUMPABLE: - if (arg2 != 0 && arg2 != 1) { + if (arg2 < 0 || arg2 > 2) { error = -EINVAL; break; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 701d12c63068..24a4d12d5aa9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -58,6 +58,7 @@ extern int sysctl_overcommit_ratio; extern int max_threads; extern int sysrq_enabled; extern int core_uses_pid; +extern int suid_dumpable; extern char core_pattern[]; extern int cad_pid; extern int pid_max; @@ -950,6 +951,14 @@ static ctl_table fs_table[] = { .proc_handler = &proc_dointvec, }, #endif + { + .ctl_name = KERN_SETUID_DUMPABLE, + .procname = "suid_dumpable", + .data = &suid_dumpable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, { .ctl_name = 0 } }; -- cgit v1.2.3 From 4fea2838aa00b9e59efde974dcdb455608192811 Mon Sep 17 00:00:00 2001 From: Kirill Korotaev Date: Thu, 23 Jun 2005 00:09:51 -0700 Subject: [PATCH] Software suspend and recalc sigpending bug fix This patch fixes recalc_sigpending() to work correctly with tasks which are being freezed. The problem is that freeze_processes() sets PF_FREEZE and TIF_SIGPENDING flags on tasks, but recalc_sigpending() called from e.g. sys_rt_sigtimedwait or any other kernel place will clear TIF_SIGPENDING due to no pending signals queued and the tasks won't be freezed until it recieves a real signal or freezed_processes() fail due to timeout. Signed-Off-By: Kirill Korotaev Signed-Off-By: Alexey Kuznetsov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/signal.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index c89821b69ae3..d1258729a5f9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -213,6 +213,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) fastcall void recalc_sigpending_tsk(struct task_struct *t) { if (t->signal->group_stop_count > 0 || + (t->flags & PF_FREEZE) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) set_tsk_thread_flag(t, TIF_SIGPENDING); -- cgit v1.2.3 From 71a2224d7d1cefc23a1ac80bba421cc069cc3257 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 23 Jun 2005 00:10:05 -0700 Subject: [PATCH] Optimize sys_times for a single thread process Avoid taking the tasklist_lock in sys_times if the process is single threaded. In a NUMA system taking the tasklist_lock may cause a bouncing cacheline if multiple independent processes continually call sys_times to measure their performance. Signed-off-by: Christoph Lameter Signed-off-by: Shai Fultheim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 5 ++++ kernel/sys.c | 86 +++++++++++++++++++++++++++++++++++++++++------------------ 2 files changed, 65 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index c2bdf6fb61a5..3ebcd60a19c6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -72,6 +72,11 @@ repeat: BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); __exit_signal(p); __exit_sighand(p); + /* + * Note that the fastpath in sys_times depends on __exit_signal having + * updated the counters before a task is removed from the tasklist of + * the process by __unhash_process. + */ __unhash_process(p); /* diff --git a/kernel/sys.c b/kernel/sys.c index 0a2c8cda9638..5a9d6b075016 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -894,35 +894,69 @@ asmlinkage long sys_times(struct tms __user * tbuf) */ if (tbuf) { struct tms tmp; - struct task_struct *tsk = current; - struct task_struct *t; cputime_t utime, stime, cutime, cstime; - read_lock(&tasklist_lock); - utime = tsk->signal->utime; - stime = tsk->signal->stime; - t = tsk; - do { - utime = cputime_add(utime, t->utime); - stime = cputime_add(stime, t->stime); - t = next_thread(t); - } while (t != tsk); - - /* - * While we have tasklist_lock read-locked, no dying thread - * can be updating current->signal->[us]time. Instead, - * we got their counts included in the live thread loop. - * However, another thread can come in right now and - * do a wait call that updates current->signal->c[us]time. - * To make sure we always see that pair updated atomically, - * we take the siglock around fetching them. - */ - spin_lock_irq(&tsk->sighand->siglock); - cutime = tsk->signal->cutime; - cstime = tsk->signal->cstime; - spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); +#ifdef CONFIG_SMP + if (thread_group_empty(current)) { + /* + * Single thread case without the use of any locks. + * + * We may race with release_task if two threads are + * executing. However, release task first adds up the + * counters (__exit_signal) before removing the task + * from the process tasklist (__unhash_process). + * __exit_signal also acquires and releases the + * siglock which results in the proper memory ordering + * so that the list modifications are always visible + * after the counters have been updated. + * + * If the counters have been updated by the second thread + * but the thread has not yet been removed from the list + * then the other branch will be executing which will + * block on tasklist_lock until the exit handling of the + * other task is finished. + * + * This also implies that the sighand->siglock cannot + * be held by another processor. So we can also + * skip acquiring that lock. + */ + utime = cputime_add(current->signal->utime, current->utime); + stime = cputime_add(current->signal->utime, current->stime); + cutime = current->signal->cutime; + cstime = current->signal->cstime; + } else +#endif + { + + /* Process with multiple threads */ + struct task_struct *tsk = current; + struct task_struct *t; + read_lock(&tasklist_lock); + utime = tsk->signal->utime; + stime = tsk->signal->stime; + t = tsk; + do { + utime = cputime_add(utime, t->utime); + stime = cputime_add(stime, t->stime); + t = next_thread(t); + } while (t != tsk); + + /* + * While we have tasklist_lock read-locked, no dying thread + * can be updating current->signal->[us]time. Instead, + * we got their counts included in the live thread loop. + * However, another thread can come in right now and + * do a wait call that updates current->signal->c[us]time. + * To make sure we always see that pair updated atomically, + * we take the siglock around fetching them. + */ + spin_lock_irq(&tsk->sighand->siglock); + cutime = tsk->signal->cutime; + cstime = tsk->signal->cstime; + spin_unlock_irq(&tsk->sighand->siglock); + read_unlock(&tasklist_lock); + } tmp.tms_utime = cputime_to_clock_t(utime); tmp.tms_stime = cputime_to_clock_t(stime); tmp.tms_cutime = cputime_to_clock_t(cutime); -- cgit v1.2.3 From c43dc2fd885b5658cfd7cedb7bcca20910c517a4 Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Thu, 23 Jun 2005 00:10:27 -0700 Subject: [PATCH] aio: make wait_queue ->task ->private In the upcoming aio_down patch, it is useful to store a private data pointer in the kiocb's wait_queue. Since we provide our own wake up function and do not require the task_struct pointer, it makes sense to convert the task pointer into a generic private pointer. Signed-off-by: Benjamin LaHaise Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 6ee4515d5a20..76080d142e3d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2869,7 +2869,7 @@ need_resched: int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { - task_t *p = curr->task; + task_t *p = curr->private; return try_to_wake_up(p, mode, sync); } -- cgit v1.2.3 From 7888e7ff4ee579442128d7d12a9c9dbf2cf7de6a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 23 Jun 2005 22:00:51 -0700 Subject: [PATCH] Keys: Pass session keyring to call_usermodehelper() The attached patch makes it possible to pass a session keyring through to the process spawned by call_usermodehelper(). This allows patch 3/3 to pass an authorisation key through to /sbin/request-key, thus permitting better access controls when doing just-in-time key creation. Signed-Off-By: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kmod.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/kmod.c b/kernel/kmod.c index eed53d4f5230..44166e3bb8af 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -120,6 +120,7 @@ struct subprocess_info { char *path; char **argv; char **envp; + struct key *ring; int wait; int retval; }; @@ -130,16 +131,21 @@ struct subprocess_info { static int ____call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; + struct key *old_session; int retval; - /* Unblock all signals. */ + /* Unblock all signals and set the session keyring. */ + key_get(sub_info->ring); flush_signals(current); spin_lock_irq(¤t->sighand->siglock); + old_session = __install_session_keyring(current, sub_info->ring); flush_signal_handlers(current, 1); sigemptyset(¤t->blocked); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + key_put(old_session); + /* We can run anywhere, unlike our parent keventd(). */ set_cpus_allowed(current, CPU_MASK_ALL); @@ -211,10 +217,11 @@ static void __call_usermodehelper(void *data) } /** - * call_usermodehelper - start a usermode application + * call_usermodehelper_keys - start a usermode application * @path: pathname for the application * @argv: null-terminated argument list * @envp: null-terminated environment list + * @session_keyring: session keyring for process (NULL for an empty keyring) * @wait: wait for the application to finish and return status. * * Runs a user-space application. The application is started @@ -224,7 +231,8 @@ static void __call_usermodehelper(void *data) * Must be called from process context. Returns a negative error code * if program was not execed successfully, or 0. */ -int call_usermodehelper(char *path, char **argv, char **envp, int wait) +int call_usermodehelper_keys(char *path, char **argv, char **envp, + struct key *session_keyring, int wait) { DECLARE_COMPLETION(done); struct subprocess_info sub_info = { @@ -232,6 +240,7 @@ int call_usermodehelper(char *path, char **argv, char **envp, int wait) .path = path, .argv = argv, .envp = envp, + .ring = session_keyring, .wait = wait, .retval = 0, }; @@ -247,7 +256,7 @@ int call_usermodehelper(char *path, char **argv, char **envp, int wait) wait_for_completion(&done); return sub_info.retval; } -EXPORT_SYMBOL(call_usermodehelper); +EXPORT_SYMBOL(call_usermodehelper_keys); void __init usermodehelper_init(void) { -- cgit v1.2.3 From 3e30148c3d524a9c1c63ca28261bc24c457eb07a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 23 Jun 2005 22:00:56 -0700 Subject: [PATCH] Keys: Make request-key create an authorisation key The attached patch makes the following changes: (1) There's a new special key type called ".request_key_auth". This is an authorisation key for when one process requests a key and another process is started to construct it. This type of key cannot be created by the user; nor can it be requested by kernel services. Authorisation keys hold two references: (a) Each refers to a key being constructed. When the key being constructed is instantiated the authorisation key is revoked, rendering it of no further use. (b) The "authorising process". This is either: (i) the process that called request_key(), or: (ii) if the process that called request_key() itself had an authorisation key in its session keyring, then the authorising process referred to by that authorisation key will also be referred to by the new authorisation key. This means that the process that initiated a chain of key requests will authorise the lot of them, and will, by default, wind up with the keys obtained from them in its keyrings. (2) request_key() creates an authorisation key which is then passed to /sbin/request-key in as part of a new session keyring. (3) When request_key() is searching for a key to hand back to the caller, if it comes across an authorisation key in the session keyring of the calling process, it will also search the keyrings of the process specified therein and it will use the specified process's credentials (fsuid, fsgid, groups) to do that rather than the calling process's credentials. This allows a process started by /sbin/request-key to find keys belonging to the authorising process. (4) A key can be read, even if the process executing KEYCTL_READ doesn't have direct read or search permission if that key is contained within the keyrings of a process specified by an authorisation key found within the calling process's session keyring, and is searchable using the credentials of the authorising process. This allows a process started by /sbin/request-key to read keys belonging to the authorising process. (5) The magic KEY_SPEC_*_KEYRING key IDs when passed to KEYCTL_INSTANTIATE or KEYCTL_NEGATE will specify a keyring of the authorising process, rather than the process doing the instantiation. (6) One of the process keyrings can be nominated as the default to which request_key() should attach new keys if not otherwise specified. This is done with KEYCTL_SET_REQKEY_KEYRING and one of the KEY_REQKEY_DEFL_* constants. The current setting can also be read using this call. (7) request_key() is partially interruptible. If it is waiting for another process to finish constructing a key, it can be interrupted. This permits a request-key cycle to be broken without recourse to rebooting. Signed-Off-By: David Howells Signed-Off-By: Benoit Boissinot Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index 5a9d6b075016..da24bc1292db 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1259,7 +1259,7 @@ static void groups_sort(struct group_info *group_info) } /* a simple bsearch */ -static int groups_search(struct group_info *group_info, gid_t grp) +int groups_search(struct group_info *group_info, gid_t grp) { int left, right; -- cgit v1.2.3 From c988d2b2845495373f666a381d354a7f80981d62 Mon Sep 17 00:00:00 2001 From: Matt Domsch Date: Thu, 23 Jun 2005 22:05:15 -0700 Subject: [PATCH] modules: add version and srcversion to sysfs This patch adds version and srcversion files to /sys/module/${modulename} containing the version and srcversion fields of the module's modinfo section (if present). /sys/module/e1000 |-- srcversion `-- version This patch differs slightly from the version posted in January, as it now uses the new kstrdup() call in -mm. Why put this in sysfs? a) Tools like DKMS, which deal with changing out individual kernel modules without replacing the whole kernel, can behave smarter if they can tell the version of a given module. The autoinstaller feature, for example, which determines if your system has a "good" version of a driver (i.e. if the one provided by DKMS has a newer verson than that provided by the kernel package installed), and to automatically compile and install a newer version if DKMS has it but your kernel doesn't yet have that version. b) Because sysadmins manually, or with tools like DKMS, can switch out modules on the file system, you can't count on 'modinfo foo.ko', which looks at /lib/modules/${kernelver}/... actually matching what is loaded into the kernel already. Hence asking sysfs for this. c) as the unbind-driver-from-device work takes shape, it will be possible to rebind a driver that's built-in (no .ko to modinfo for the version) to a newly loaded module. sysfs will have the currently-built-in version info, for comparison. d) tech support scripts can then easily grab the version info for what's running presently - a question I get often. There has been renewed interest in this patch on linux-scsi by driver authors. As the idea originated from GregKH, I leave his Signed-off-by: intact, though the implementation is nearly completely new. Compiled and run on x86 and x86_64. From: Matthew Dobson build fix From: Thierry Vignaud build fix From: Matthew Dobson warning fix Signed-off-by: Greg Kroah-Hartman Signed-off-by: Matt Domsch Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/module.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index a566745dde62..0494c89a0d26 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -370,6 +371,43 @@ static inline void percpu_modcopy(void *pcpudst, const void *src, #endif /* CONFIG_SMP */ #ifdef CONFIG_MODULE_UNLOAD +#define MODINFO_ATTR(field) \ +static void setup_modinfo_##field(struct module *mod, const char *s) \ +{ \ + mod->field = kstrdup(s, GFP_KERNEL); \ +} \ +static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ + struct module *mod, char *buffer) \ +{ \ + return sprintf(buffer, "%s\n", mod->field); \ +} \ +static int modinfo_##field##_exists(struct module *mod) \ +{ \ + return mod->field != NULL; \ +} \ +static void free_modinfo_##field(struct module *mod) \ +{ \ + kfree(mod->field); \ + mod->field = NULL; \ +} \ +static struct module_attribute modinfo_##field = { \ + .attr = { .name = __stringify(field), .mode = 0444, \ + .owner = THIS_MODULE }, \ + .show = show_modinfo_##field, \ + .setup = setup_modinfo_##field, \ + .test = modinfo_##field##_exists, \ + .free = free_modinfo_##field, \ +}; + +MODINFO_ATTR(version); +MODINFO_ATTR(srcversion); + +static struct module_attribute *modinfo_attrs[] = { + &modinfo_version, + &modinfo_srcversion, + NULL, +}; + /* Init the unload section of the module. */ static void module_unload_init(struct module *mod) { @@ -1031,6 +1069,32 @@ static void module_remove_refcnt_attr(struct module *mod) } #endif +#ifdef CONFIG_MODULE_UNLOAD +static int module_add_modinfo_attrs(struct module *mod) +{ + struct module_attribute *attr; + int error = 0; + int i; + + for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) { + if (!attr->test || + (attr->test && attr->test(mod))) + error = sysfs_create_file(&mod->mkobj.kobj,&attr->attr); + } + return error; +} + +static void module_remove_modinfo_attrs(struct module *mod) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = modinfo_attrs[i]); i++) { + sysfs_remove_file(&mod->mkobj.kobj,&attr->attr); + attr->free(mod); + } +} +#endif static int mod_sysfs_setup(struct module *mod, struct kernel_param *kparam, @@ -1056,6 +1120,12 @@ static int mod_sysfs_setup(struct module *mod, if (err) goto out_unreg; +#ifdef CONFIG_MODULE_UNLOAD + err = module_add_modinfo_attrs(mod); + if (err) + goto out_unreg; +#endif + return 0; out_unreg: @@ -1066,6 +1136,9 @@ out: static void mod_kobject_remove(struct module *mod) { +#ifdef CONFIG_MODULE_UNLOAD + module_remove_modinfo_attrs(mod); +#endif module_remove_refcnt_attr(mod); module_param_sysfs_remove(mod); @@ -1311,6 +1384,23 @@ static char *get_modinfo(Elf_Shdr *sechdrs, return NULL; } +#ifdef CONFIG_MODULE_UNLOAD +static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs, + unsigned int infoindex) +{ + struct module_attribute *attr; + int i; + + for (i = 0; (attr = modinfo_attrs[i]); i++) { + if (attr->setup) + attr->setup(mod, + get_modinfo(sechdrs, + infoindex, + attr->attr.name)); + } +} +#endif + #ifdef CONFIG_KALLSYMS int is_exported(const char *name, const struct module *mod) { @@ -1615,6 +1705,11 @@ static struct module *load_module(void __user *umod, /* Set up license info based on the info section */ set_license(mod, get_modinfo(sechdrs, infoindex, "license")); +#ifdef CONFIG_MODULE_UNLOAD + /* Set up MODINFO_ATTR fields */ + setup_modinfo(mod, sechdrs, infoindex); +#endif + /* Fix up syms, so that st_value is a pointer to location. */ err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex, mod); -- cgit v1.2.3 From 52c1da39534fb382c061de58b65f678ad74b59f5 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Thu, 23 Jun 2005 22:05:33 -0700 Subject: [PATCH] make various thing static Another rollup of patches which give various symbols static scope Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/spurious.c | 2 +- kernel/module.c | 2 +- kernel/power/swsusp.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index f6297c306905..ba039e827d58 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -45,7 +45,7 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) } } -void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) +static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) { static int count = 100; diff --git a/kernel/module.c b/kernel/module.c index 0494c89a0d26..068e271ab3a5 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -730,7 +730,7 @@ static int obsparm_copy_string(const char *val, struct kernel_param *kp) return 0; } -int set_obsolete(const char *val, struct kernel_param *kp) +static int set_obsolete(const char *val, struct kernel_param *kp) { unsigned int min, max; unsigned int size, maxsize; diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 90b3b68dee3f..53f9f8720ee4 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -81,7 +81,7 @@ static int nr_copy_pages_check; extern char resume_file[]; /* Local variables that should not be affected by save */ -unsigned int nr_copy_pages __nosavedata = 0; +static unsigned int nr_copy_pages __nosavedata = 0; /* Suspend pagedir is allocated before final copy, therefore it must be freed after resume -- cgit v1.2.3 From f370513640492641b4046bfd9a6e4714f6ae530d Mon Sep 17 00:00:00 2001 From: Zwane Mwaikambo Date: Sat, 25 Jun 2005 14:54:50 -0700 Subject: [PATCH] i386 CPU hotplug (The i386 CPU hotplug patch provides infrastructure for some work which Pavel is doing as well as for ACPI S3 (suspend-to-RAM) work which Li Shaohua is doing) The following provides i386 architecture support for safely unregistering and registering processors during runtime, updated for the current -mm tree. In order to avoid dumping cpu hotplug code into kernel/irq/* i dropped the cpu_online check in do_IRQ() by modifying fixup_irqs(). The difference being that on cpu offline, fixup_irqs() is called before we clear the cpu from cpu_online_map and a long delay in order to ensure that we never have any queued external interrupts on the APICs. There are additional changes to s390 and ppc64 to account for this change. 1) Add CONFIG_HOTPLUG_CPU 2) disable local APIC timer on dead cpus. 3) Disable preempt around irq balancing to prevent CPUs going down. 4) Print irq stats for all possible cpus. 5) Debugging check for interrupts on offline cpus. 6) Hacky fixup_irqs() to redirect irqs when cpus go off/online. 7) play_dead() for offline cpus to spin inside. 8) Handle offline cpus set in flush_tlb_others(). 9) Grab lock earlier in smp_call_function() to prevent CPUs going down. 10) Implement __cpu_disable() and __cpu_die(). 11) Enable local interrupts in cpu_enable() after fixup_irqs() 12) Don't fiddle with NMI on dead cpu, but leave intact on other cpus. 13) Program IRQ affinity whilst cpu is still in cpu_online_map on offline. Signed-off-by: Zwane Mwaikambo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpu.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 628f4ccda127..53d8263ae12e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -63,19 +63,15 @@ static int take_cpu_down(void *unused) { int err; - /* Take offline: makes arch_cpu_down somewhat easier. */ - cpu_clear(smp_processor_id(), cpu_online_map); - /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) - cpu_set(smp_processor_id(), cpu_online_map); - else - /* Force idle task to run as soon as we yield: it should - immediately notice cpu is offline and die quickly. */ - sched_idle_next(); + return err; - return err; + /* Force idle task to run as soon as we yield: it should + immediately notice cpu is offline and die quickly. */ + sched_idle_next(); + return 0; } int cpu_down(unsigned int cpu) -- cgit v1.2.3 From 5a72e04df5470df0ec646029d31e5528167ab1a7 Mon Sep 17 00:00:00 2001 From: Li Shaohua Date: Sat, 25 Jun 2005 14:55:06 -0700 Subject: [PATCH] suspend/resume SMP support Using CPU hotplug to support suspend/resume SMP. Both S3 and S4 use disable/enable_nonboot_cpus API. The S4 part is based on Pavel's original S4 SMP patch. Signed-off-by: Li Shaohua Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/Kconfig | 6 +++- kernel/power/Makefile | 6 ++-- kernel/power/disk.c | 35 ++++++++++---------- kernel/power/main.c | 16 +++++---- kernel/power/smp.c | 89 ++++++++++++++++++++------------------------------- kernel/power/swsusp.c | 2 ++ 6 files changed, 72 insertions(+), 82 deletions(-) (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 696387ffe49c..fdb377636505 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -28,7 +28,7 @@ config PM_DEBUG config SOFTWARE_SUSPEND bool "Software Suspend (EXPERIMENTAL)" - depends on EXPERIMENTAL && PM && SWAP + depends on EXPERIMENTAL && PM && SWAP && (SUSPEND_SMP || !SMP) ---help--- Enable the possibility of suspending the machine. It doesn't need APM. @@ -72,3 +72,7 @@ config PM_STD_PARTITION suspended image to. It will simply pick the first available swap device. +config SUSPEND_SMP + bool + depends on HOTPLUG_CPU && X86 && PM + default y diff --git a/kernel/power/Makefile b/kernel/power/Makefile index fbdc634135a7..2f438d0eaa13 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -3,9 +3,9 @@ ifeq ($(CONFIG_PM_DEBUG),y) EXTRA_CFLAGS += -DDEBUG endif -swsusp-smp-$(CONFIG_SMP) += smp.o - obj-y := main.o process.o console.o pm.o -obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o $(swsusp-smp-y) disk.o +obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o + +obj-$(CONFIG_SUSPEND_SMP) += smp.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 02b6764034dc..fb8de63c2919 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -117,8 +117,8 @@ static void finish(void) { device_resume(); platform_finish(); - enable_nonboot_cpus(); thaw_processes(); + enable_nonboot_cpus(); pm_restore_console(); } @@ -131,28 +131,35 @@ static int prepare_processes(void) sys_sync(); + disable_nonboot_cpus(); + if (freeze_processes()) { error = -EBUSY; - return error; + goto thaw; } if (pm_disk_mode == PM_DISK_PLATFORM) { if (pm_ops && pm_ops->prepare) { if ((error = pm_ops->prepare(PM_SUSPEND_DISK))) - return error; + goto thaw; } } /* Free memory before shutting down devices. */ free_some_memory(); - return 0; +thaw: + thaw_processes(); + enable_nonboot_cpus(); + pm_restore_console(); + return error; } static void unprepare_processes(void) { - enable_nonboot_cpus(); + platform_finish(); thaw_processes(); + enable_nonboot_cpus(); pm_restore_console(); } @@ -160,15 +167,9 @@ static int prepare_devices(void) { int error; - disable_nonboot_cpus(); - if ((error = device_suspend(PMSG_FREEZE))) { + if ((error = device_suspend(PMSG_FREEZE))) printk("Some devices failed to suspend\n"); - platform_finish(); - enable_nonboot_cpus(); - return error; - } - - return 0; + return error; } /** @@ -185,9 +186,9 @@ int pm_suspend_disk(void) int error; error = prepare_processes(); - if (!error) { - error = prepare_devices(); - } + if (error) + return error; + error = prepare_devices(); if (error) { unprepare_processes(); @@ -250,7 +251,7 @@ static int software_resume(void) if ((error = prepare_processes())) { swsusp_close(); - goto Cleanup; + goto Done; } pr_debug("PM: Reading swsusp image.\n"); diff --git a/kernel/power/main.c b/kernel/power/main.c index 4cdebc972ff2..c94cb9e95090 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -55,6 +55,13 @@ static int suspend_prepare(suspend_state_t state) pm_prepare_console(); + disable_nonboot_cpus(); + + if (num_online_cpus() != 1) { + error = -EPERM; + goto Enable_cpu; + } + if (freeze_processes()) { error = -EAGAIN; goto Thaw; @@ -75,6 +82,8 @@ static int suspend_prepare(suspend_state_t state) pm_ops->finish(state); Thaw: thaw_processes(); + Enable_cpu: + enable_nonboot_cpus(); pm_restore_console(); return error; } @@ -113,6 +122,7 @@ static void suspend_finish(suspend_state_t state) if (pm_ops && pm_ops->finish) pm_ops->finish(state); thaw_processes(); + enable_nonboot_cpus(); pm_restore_console(); } @@ -150,12 +160,6 @@ static int enter_state(suspend_state_t state) goto Unlock; } - /* Suspend is hard to get right on SMP. */ - if (num_online_cpus() != 1) { - error = -EPERM; - goto Unlock; - } - pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); if ((error = suspend_prepare(state))) goto Unlock; diff --git a/kernel/power/smp.c b/kernel/power/smp.c index 457c2302ed42..bbe23079c62c 100644 --- a/kernel/power/smp.c +++ b/kernel/power/smp.c @@ -13,73 +13,52 @@ #include #include #include +#include #include #include -static atomic_t cpu_counter, freeze; - - -static void smp_pause(void * data) -{ - struct saved_context ctxt; - __save_processor_state(&ctxt); - printk("Sleeping in:\n"); - dump_stack(); - atomic_inc(&cpu_counter); - while (atomic_read(&freeze)) { - /* FIXME: restore takes place at random piece inside this. - This should probably be written in assembly, and - preserve general-purpose registers, too - - What about stack? We may need to move to new stack here. - - This should better be ran with interrupts disabled. - */ - cpu_relax(); - barrier(); - } - atomic_dec(&cpu_counter); - __restore_processor_state(&ctxt); -} - -static cpumask_t oldmask; +/* This is protected by pm_sem semaphore */ +static cpumask_t frozen_cpus; void disable_nonboot_cpus(void) { - oldmask = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(0)); - printk("Freezing CPUs (at %d)", raw_smp_processor_id()); - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ); - printk("..."); - BUG_ON(raw_smp_processor_id() != 0); - - /* FIXME: for this to work, all the CPUs must be running - * "idle" thread (or we deadlock). Is that guaranteed? */ + int cpu, error; - atomic_set(&cpu_counter, 0); - atomic_set(&freeze, 1); - smp_call_function(smp_pause, NULL, 0, 0); - while (atomic_read(&cpu_counter) < (num_online_cpus() - 1)) { - cpu_relax(); - barrier(); + error = 0; + cpus_clear(frozen_cpus); + printk("Freezing cpus ...\n"); + for_each_online_cpu(cpu) { + if (cpu == 0) + continue; + error = cpu_down(cpu); + if (!error) { + cpu_set(cpu, frozen_cpus); + printk("CPU%d is down\n", cpu); + continue; + } + printk("Error taking cpu %d down: %d\n", cpu, error); } - printk("ok\n"); + BUG_ON(smp_processor_id() != 0); + if (error) + panic("cpus not sleeping"); } void enable_nonboot_cpus(void) { - printk("Restarting CPUs"); - atomic_set(&freeze, 0); - while (atomic_read(&cpu_counter)) { - cpu_relax(); - barrier(); - } - printk("..."); - set_cpus_allowed(current, oldmask); - schedule(); - printk("ok\n"); + int cpu, error; + printk("Thawing cpus ...\n"); + for_each_cpu_mask(cpu, frozen_cpus) { + error = smp_prepare_cpu(cpu); + if (!error) + error = cpu_up(cpu); + if (!error) { + printk("CPU%d is up\n", cpu); + continue; + } + printk("Error taking cpu %d up: %d\n", cpu, error); + panic("Not enough cpus"); + } + cpus_clear(frozen_cpus); } - diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 53f9f8720ee4..339b5c3735bd 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -1193,8 +1193,10 @@ static const char * sanity_check(void) return "version"; if (strcmp(swsusp_info.uts.machine,system_utsname.machine)) return "machine"; +#if 0 if(swsusp_info.cpus != num_online_cpus()) return "number of cpus"; +#endif return NULL; } -- cgit v1.2.3 From 620b03276488c3cf103caf1e326bd21f00d3df84 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Sat, 25 Jun 2005 14:55:11 -0700 Subject: [PATCH] properly stop devices before poweroff Without this patch, Linux provokes emergency disk shutdowns and similar nastiness. It was in SuSE kernels for some time, IIRC. Signed-off-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index da24bc1292db..dac10161ca23 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -405,6 +405,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user case LINUX_REBOOT_CMD_HALT: notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); system_state = SYSTEM_HALT; + device_suspend(PMSG_SUSPEND); device_shutdown(); printk(KERN_EMERG "System halted.\n"); machine_halt(); @@ -415,6 +416,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user case LINUX_REBOOT_CMD_POWER_OFF: notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); system_state = SYSTEM_POWER_OFF; + device_suspend(PMSG_SUSPEND); device_shutdown(); printk(KERN_EMERG "Power down.\n"); machine_power_off(); @@ -431,6 +433,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer); system_state = SYSTEM_RESTART; + device_suspend(PMSG_FREEZE); device_shutdown(); printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer); machine_restart(buffer); -- cgit v1.2.3 From 8f9bdf15c059c5d84db9c395705bf79b30762420 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Sat, 25 Jun 2005 14:55:12 -0700 Subject: [PATCH] swsusp: kill unneccessary does_collide_order The following patch removes the unnecessary function does_collide_order(). This function is no longer necessary, as currently there are only 0-order allocations in swsusp, and the use of it is confusing. Signed-off-by: Rafael J. Wysocki Signed-off-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swsusp.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 339b5c3735bd..7747a8c43e84 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -929,21 +929,6 @@ int swsusp_resume(void) return error; } -/* More restore stuff */ - -/* - * Returns true if given address/order collides with any orig_address - */ -static int does_collide_order(unsigned long addr, int order) -{ - int i; - - for (i=0; i < (1< Date: Sat, 25 Jun 2005 14:55:12 -0700 Subject: [PATCH] swsusp: cleanup whitespace The following patch cleans up whitespace in swsusp.c (a bit): - removes any trailing whitespace - adds spaces after if, for, for_each_pbe, for_each_zone etc., wherever necessary. Signed-off-by: Rafael J. Wysocki Signed-off-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swsusp.c | 66 +++++++++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 7747a8c43e84..9a3ca659a436 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -10,12 +10,12 @@ * This file is released under the GPLv2. * * I'd like to thank the following people for their work: - * + * * Pavel Machek : * Modifications, defectiveness pointing, being with me at the very beginning, * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17. * - * Steve Doddi : + * Steve Doddi : * Support the possibility of hardware state restoring. * * Raph : @@ -84,11 +84,11 @@ extern char resume_file[]; static unsigned int nr_copy_pages __nosavedata = 0; /* Suspend pagedir is allocated before final copy, therefore it - must be freed after resume + must be freed after resume Warning: this is evil. There are actually two pagedirs at time of resume. One is "pagedir_save", which is empty frame allocated at - time of suspend, that must be freed. Second is "pagedir_nosave", + time of suspend, that must be freed. Second is "pagedir_nosave", allocated at time of resume, that travels through memory not to collide with anything. @@ -132,7 +132,7 @@ static int mark_swapfiles(swp_entry_t prev) { int error; - rw_swap_page_sync(READ, + rw_swap_page_sync(READ, swp_entry(root_swap, 0), virt_to_page((unsigned long)&swsusp_header)); if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) || @@ -140,7 +140,7 @@ static int mark_swapfiles(swp_entry_t prev) memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); memcpy(swsusp_header.sig,SWSUSP_SIG, 10); swsusp_header.swsusp_info = prev; - error = rw_swap_page_sync(WRITE, + error = rw_swap_page_sync(WRITE, swp_entry(root_swap, 0), virt_to_page((unsigned long) &swsusp_header)); @@ -174,22 +174,22 @@ static int is_resume_device(const struct swap_info_struct *swap_info) static int swsusp_swap_check(void) /* This is called before saving image */ { int i, len; - + len=strlen(resume_file); root_swap = 0xFFFF; - + swap_list_lock(); - for(i=0; iaddress, &(p->swap_address)))) @@ -335,7 +335,7 @@ static int close_swap(void) dump_info(); error = write_page((unsigned long)&swsusp_info, &entry); - if (!error) { + if (!error) { printk( "S" ); error = mark_swapfiles(entry); printk( "|\n" ); @@ -370,7 +370,7 @@ static int write_pagedir(void) struct pbe * pbe; printk( "Writing pagedir..."); - for_each_pb_page(pbe, pagedir_nosave) { + for_each_pb_page (pbe, pagedir_nosave) { if ((error = write_page((unsigned long)pbe, &swsusp_info.pagedir[n++]))) return error; } @@ -472,7 +472,7 @@ static int save_highmem(void) int res = 0; pr_debug("swsusp: Saving Highmem\n"); - for_each_zone(zone) { + for_each_zone (zone) { if (is_highmem(zone)) res = save_highmem_zone(zone); if (res) @@ -547,7 +547,7 @@ static void count_data_pages(void) nr_copy_pages = 0; - for_each_zone(zone) { + for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); @@ -562,9 +562,9 @@ static void copy_data_pages(void) struct zone *zone; unsigned long zone_pfn; struct pbe * pbe = pagedir_nosave; - + pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages); - for_each_zone(zone) { + for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); @@ -702,7 +702,7 @@ static void free_image_pages(void) { struct pbe * p; - for_each_pbe(p, pagedir_save) { + for_each_pbe (p, pagedir_save) { if (p->address) { ClearPageNosave(virt_to_page(p->address)); free_page(p->address); @@ -719,7 +719,7 @@ static int alloc_image_pages(void) { struct pbe * p; - for_each_pbe(p, pagedir_save) { + for_each_pbe (p, pagedir_save) { p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD); if (!p->address) return -ENOMEM; @@ -740,7 +740,7 @@ void swsusp_free(void) /** * enough_free_mem - Make sure we enough free memory to snapshot. * - * Returns TRUE or FALSE after checking the number of available + * Returns TRUE or FALSE after checking the number of available * free pages. */ @@ -758,11 +758,11 @@ static int enough_free_mem(void) /** * enough_swap - Make sure we have enough swap to save the image. * - * Returns TRUE or FALSE after checking the total amount of swap + * Returns TRUE or FALSE after checking the total amount of swap * space avaiable. * * FIXME: si_swapinfo(&i) returns all swap devices information. - * We should only consider resume_device. + * We should only consider resume_device. */ static int enough_swap(void) @@ -827,8 +827,8 @@ static int suspend_prepare_image(void) error = swsusp_alloc(); if (error) return error; - - /* During allocating of suspend pagedir, new cold pages may appear. + + /* During allocating of suspend pagedir, new cold pages may appear. * Kill them. */ drain_local_pages(); @@ -1030,7 +1030,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist) /* Set page flags */ - for_each_zone(zone) { + for_each_zone (zone) { for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) SetPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); -- cgit v1.2.3 From c61978b30322c83a94d7e4857fa5b9996b7d7931 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Sat, 25 Jun 2005 14:55:14 -0700 Subject: [PATCH] swsusp: fix nr_copy_pages The following patch moves the recalculation of nr_copy_pages so that the right number is used in the calculation of the size of memory and swap needed. It prevents swsusp from attempting to suspend if there is not enough memory and/or swap (which is unlikely anyway). Signed-off-by: Rafael J. Wysocki Signed-off-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swsusp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 9a3ca659a436..c285fc5a2320 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -781,18 +781,18 @@ static int swsusp_alloc(void) { int error; + pagedir_nosave = NULL; + nr_copy_pages = calc_nr(nr_copy_pages); + pr_debug("suspend: (pages needed: %d + %d free: %d)\n", nr_copy_pages, PAGES_FOR_IO, nr_free_pages()); - pagedir_nosave = NULL; if (!enough_free_mem()) return -ENOMEM; if (!enough_swap()) return -ENOSPC; - nr_copy_pages = calc_nr(nr_copy_pages); - if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) { printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); return -ENOMEM; -- cgit v1.2.3 From ac25575203c11145066ea5cb583354cb5f0a8ade Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Sat, 25 Jun 2005 14:55:15 -0700 Subject: [PATCH] CPU hotplug printk fix In the cpu hotplug case, per-cpu data possibly isn't initialized even the system state is 'running'. As the comments say in the original code, some console drivers assume per-cpu resources have been allocated. radeon fb is one such driver, which uses kmalloc. After a CPU is down, the per-cpu data of slab is freed, so the system crashed when printing some info. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 3a442bfb8bee..5092397fac29 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -588,8 +588,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) log_level_unknown = 1; } - if (!cpu_online(smp_processor_id()) && - system_state != SYSTEM_RUNNING) { + if (!cpu_online(smp_processor_id())) { /* * Some console drivers may assume that per-cpu resources have * been allocated. So don't allow them to be called by this -- cgit v1.2.3 From 19c324397a55edf122822f829779b46b9cb385dd Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Sat, 25 Jun 2005 14:55:17 -0700 Subject: [PATCH] swsusp: only allow it when it makes sense Show swsuspend only on .config where it can compile. I got this on PPC32 && SMP: kernel/power/smp.c:24: error: storage size of `ctxt' isn't known Also mark swsusp as no longer experimental. Signed-off-by: Olaf Hering Signed-off-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index fdb377636505..2c7121d9bff1 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -27,8 +27,8 @@ config PM_DEBUG like suspend support. config SOFTWARE_SUSPEND - bool "Software Suspend (EXPERIMENTAL)" - depends on EXPERIMENTAL && PM && SWAP && (SUSPEND_SMP || !SMP) + bool "Software Suspend" + depends on EXPERIMENTAL && PM && SWAP && ((X86 && SMP) || ((FVR || PPC32 || X86) && !SMP)) ---help--- Enable the possibility of suspending the machine. It doesn't need APM. -- cgit v1.2.3 From e0f364f4069f76a3613a797c388832822d179076 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:06 -0700 Subject: [PATCH] sched: cleanup wake_idle New sched-domains code means we don't get spans with offline CPUs in them. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 76080d142e3d..86be13ee5006 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -927,14 +927,14 @@ static int wake_idle(int cpu, task_t *p) for_each_domain(cpu, sd) { if (sd->flags & SD_WAKE_IDLE) { - cpus_and(tmp, sd->span, cpu_online_map); - cpus_and(tmp, tmp, p->cpus_allowed); + cpus_and(tmp, sd->span, p->cpus_allowed); for_each_cpu_mask(i, tmp) { if (idle_cpu(i)) return i; } } - else break; + else + break; } return cpu; } -- cgit v1.2.3 From 8102679447da7fcbcb5226ee0207c3a034bc6d5f Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:07 -0700 Subject: [PATCH] sched: improve load balancing pinned tasks John Hawkes explained the problem best: A large number of processes that are pinned to a single CPU results in every other CPU's load_balance() seeing this overloaded CPU as "busiest", yet move_tasks() never finds a task to pull-migrate. This condition occurs during module unload, but can also occur as a denial-of-service using sys_sched_setaffinity(). Several hundred CPUs performing this fruitless load_balance() will livelock on the busiest CPU's runqueue lock. A smaller number of CPUs will livelock if the pinned task count gets high. Expanding slightly on John's patch, this one attempts to work out whether the balancing failure has been due to too many tasks pinned on the runqueue. This allows it to be basically invisible to the regular blancing paths (ie. when there are no pinned tasks). We can use this extra knowledge to shut down the balancing faster, and ensure the migration threads don't start running which is another problem observed in the wild. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 62 ++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 86be13ee5006..2794c79b9197 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1632,7 +1632,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, */ static inline int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, - struct sched_domain *sd, enum idle_type idle) + struct sched_domain *sd, enum idle_type idle, int *all_pinned) { /* * We do not migrate tasks that are: @@ -1640,10 +1640,12 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. */ - if (task_running(rq, p)) - return 0; if (!cpu_isset(this_cpu, p->cpus_allowed)) return 0; + *all_pinned = 0; + + if (task_running(rq, p)) + return 0; /* * Aggressive migration if: @@ -1656,7 +1658,7 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, return 1; if (task_hot(p, rq->timestamp_last_tick, sd)) - return 0; + return 0; return 1; } @@ -1669,16 +1671,18 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, */ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, unsigned long max_nr_move, struct sched_domain *sd, - enum idle_type idle) + enum idle_type idle, int *all_pinned) { prio_array_t *array, *dst_array; struct list_head *head, *curr; - int idx, pulled = 0; + int idx, pulled = 0, pinned = 0; task_t *tmp; - if (max_nr_move <= 0 || busiest->nr_running <= 1) + if (max_nr_move == 0) goto out; + pinned = 1; + /* * We first consider expired tasks. Those will likely not be * executed in the near future, and they are most likely to @@ -1717,7 +1721,7 @@ skip_queue: curr = curr->prev; - if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) { + if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { if (curr != head) goto skip_queue; idx++; @@ -1746,6 +1750,9 @@ out: * inside pull_task(). */ schedstat_add(sd, lb_gained[idle], pulled); + + if (all_pinned) + *all_pinned = pinned; return pulled; } @@ -1917,7 +1924,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, struct sched_group *group; runqueue_t *busiest; unsigned long imbalance; - int nr_moved; + int nr_moved, all_pinned; + int active_balance = 0; spin_lock(&this_rq->lock); schedstat_inc(sd, lb_cnt[idle]); @@ -1956,9 +1964,15 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, */ double_lock_balance(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, idle); + imbalance, sd, idle, + &all_pinned); spin_unlock(&busiest->lock); + + /* All tasks on this runqueue were pinned by CPU affinity */ + if (unlikely(all_pinned)) + goto out_balanced; } + spin_unlock(&this_rq->lock); if (!nr_moved) { @@ -1966,16 +1980,15 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, sd->nr_balance_failed++; if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { - int wake = 0; spin_lock(&busiest->lock); if (!busiest->active_balance) { busiest->active_balance = 1; busiest->push_cpu = this_cpu; - wake = 1; + active_balance = 1; } spin_unlock(&busiest->lock); - if (wake) + if (active_balance) wake_up_process(busiest->migration_thread); /* @@ -1984,18 +1997,21 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, */ sd->nr_balance_failed = sd->cache_nice_tries; } - - /* - * We were unbalanced, but unsuccessful in move_tasks(), - * so bump the balance_interval to lessen the lock contention. - */ - if (sd->balance_interval < sd->max_interval) - sd->balance_interval++; - } else { + } else sd->nr_balance_failed = 0; + if (likely(!active_balance)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; + } else { + /* + * If we've begun active balancing, start to back off. This + * case may not be covered by the all_pinned logic if there + * is only 1 task on the busy runqueue (because we don't call + * move_tasks). + */ + if (sd->balance_interval < sd->max_interval) + sd->balance_interval *= 2; } return nr_moved; @@ -2047,7 +2063,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); nr_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, NEWLY_IDLE); + imbalance, sd, NEWLY_IDLE, NULL); if (!nr_moved) schedstat_inc(sd, lb_failed[NEWLY_IDLE]); @@ -2126,7 +2142,7 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) /* move a task from busiest_rq to target_rq */ double_lock_balance(busiest_rq, target_rq); if (move_tasks(target_rq, cpu, busiest_rq, - 1, sd, SCHED_IDLE)) { + 1, sd, SCHED_IDLE, NULL)) { schedstat_inc(sd, alb_pushed); } else { schedstat_inc(sd, alb_failed); -- cgit v1.2.3 From 16cfb1c04c3cbe3759f339d3333e7e1e7d59712a Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:08 -0700 Subject: [PATCH] sched: reduce active load balancing Fix up active load balancing a bit so it doesn't get called when it shouldn't. Reset the nr_balance_failed counter at more points where we have found conditions to be balanced. This reduces too aggressive active balancing seen on some workloads. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 2794c79b9197..03d737791c1a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2021,6 +2021,7 @@ out_balanced: schedstat_inc(sd, lb_balanced[idle]); + sd->nr_balance_failed = 0; /* tune up the balancing interval */ if (sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; @@ -2046,16 +2047,14 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); if (!group) { - schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); - goto out; + goto out_balanced; } busiest = find_busiest_queue(group); if (!busiest || busiest == this_rq) { - schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); - goto out; + goto out_balanced; } /* Attempt to move tasks */ @@ -2066,11 +2065,16 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, imbalance, sd, NEWLY_IDLE, NULL); if (!nr_moved) schedstat_inc(sd, lb_failed[NEWLY_IDLE]); + else + sd->nr_balance_failed = 0; spin_unlock(&busiest->lock); - -out: return nr_moved; + +out_balanced: + schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); + sd->nr_balance_failed = 0; + return 0; } /* -- cgit v1.2.3 From 3950745131e23472fb5ace2ee4a2093e7590ec69 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:09 -0700 Subject: [PATCH] sched: fix SMT scheduling problems SMT balancing has a couple of problems. Firstly, active_load_balance is too complex - basically it should be a dumb helper for when the periodic balancer has determined there is an imbalance, but gets stuck because the task is running. So rip out all its "smarts", and just make it move one task to the target CPU. Second, the busy CPU's sched-domain tree was being used for active balancing. This means that it may not see that nr_balance_failed has reached a critical level. So use the target CPU's sched-domain tree for this. We can do this because we hold its runqueue lock. Lastly, reset nr_balance_failed to a point where we allow cache hot migration. This will help ensure active load balancing is successful. Thanks to Suresh Siddha for pointing out these issues. Signed-off-by: Nick Piggin Signed-off-by: Suresh Siddha Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 76 ++++++++++++++++++++++++---------------------------------- 1 file changed, 31 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 03d737791c1a..41e69b5ee652 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1995,7 +1995,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, * We've kicked active balancing, reset the failure * counter. */ - sd->nr_balance_failed = sd->cache_nice_tries; + sd->nr_balance_failed = sd->cache_nice_tries+1; } } else sd->nr_balance_failed = 0; @@ -2106,56 +2106,42 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq) static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) { struct sched_domain *sd; - struct sched_group *cpu_group; runqueue_t *target_rq; - cpumask_t visited_cpus; - int cpu; + int target_cpu = busiest_rq->push_cpu; + + if (busiest_rq->nr_running <= 1) + /* no task to move */ + return; + + target_rq = cpu_rq(target_cpu); /* - * Search for suitable CPUs to push tasks to in successively higher - * domains with SD_LOAD_BALANCE set. + * This condition is "impossible", if it occurs + * we need to fix it. Originally reported by + * Bjorn Helgaas on a 128-cpu setup. */ - visited_cpus = CPU_MASK_NONE; - for_each_domain(busiest_cpu, sd) { - if (!(sd->flags & SD_LOAD_BALANCE)) - /* no more domains to search */ - break; + BUG_ON(busiest_rq == target_rq); - schedstat_inc(sd, alb_cnt); + /* move a task from busiest_rq to target_rq */ + double_lock_balance(busiest_rq, target_rq); - cpu_group = sd->groups; - do { - for_each_cpu_mask(cpu, cpu_group->cpumask) { - if (busiest_rq->nr_running <= 1) - /* no more tasks left to move */ - return; - if (cpu_isset(cpu, visited_cpus)) - continue; - cpu_set(cpu, visited_cpus); - if (!cpu_and_siblings_are_idle(cpu) || cpu == busiest_cpu) - continue; - - target_rq = cpu_rq(cpu); - /* - * This condition is "impossible", if it occurs - * we need to fix it. Originally reported by - * Bjorn Helgaas on a 128-cpu setup. - */ - BUG_ON(busiest_rq == target_rq); - - /* move a task from busiest_rq to target_rq */ - double_lock_balance(busiest_rq, target_rq); - if (move_tasks(target_rq, cpu, busiest_rq, - 1, sd, SCHED_IDLE, NULL)) { - schedstat_inc(sd, alb_pushed); - } else { - schedstat_inc(sd, alb_failed); - } - spin_unlock(&target_rq->lock); - } - cpu_group = cpu_group->next; - } while (cpu_group != sd->groups); - } + /* Search for an sd spanning us and the target CPU. */ + for_each_domain(target_cpu, sd) + if ((sd->flags & SD_LOAD_BALANCE) && + cpu_isset(busiest_cpu, sd->span)) + break; + + if (unlikely(sd == NULL)) + goto out; + + schedstat_inc(sd, alb_cnt); + + if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL)) + schedstat_inc(sd, alb_pushed); + else + schedstat_inc(sd, alb_failed); +out: + spin_unlock(&target_rq->lock); } /* -- cgit v1.2.3 From db935dbd43c4290d710304662cc908f733afea06 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:11 -0700 Subject: [PATCH] sched: add debugging These conditions should now be impossible, and we need to fix them if they happen. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 41e69b5ee652..8b035a8b3c30 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1942,15 +1942,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, goto out_balanced; } - /* - * This should be "impossible", but since load - * balancing is inherently racy and statistical, - * it could happen in theory. - */ - if (unlikely(busiest == this_rq)) { - WARN_ON(1); - goto out_balanced; - } + BUG_ON(busiest == this_rq); schedstat_add(sd, lb_imbalance[idle], imbalance); @@ -2052,11 +2044,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, } busiest = find_busiest_queue(group); - if (!busiest || busiest == this_rq) { + if (!busiest) { schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); goto out_balanced; } + BUG_ON(busiest == this_rq); + /* Attempt to move tasks */ double_lock_balance(this_rq, busiest); -- cgit v1.2.3 From 99b61ccf0bf0e9a85823d39a5db6a1519caeb13d Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:12 -0700 Subject: [PATCH] sched: less aggressive idle balancing Remove the special casing for idle CPU balancing. Things like this are hurting for example on SMT, where are single sibling being idle doesn't really warrant a really aggressive pull over the NUMA domain, for example. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 8b035a8b3c30..f665de34ed82 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1877,15 +1877,9 @@ nextgroup: /* Get rid of the scaling factor, rounding down as we divide */ *imbalance = *imbalance / SCHED_LOAD_SCALE; - return busiest; out_balanced: - if (busiest && (idle == NEWLY_IDLE || - (idle == SCHED_IDLE && max_load > SCHED_LOAD_SCALE)) ) { - *imbalance = 1; - return busiest; - } *imbalance = 0; return NULL; -- cgit v1.2.3 From 7897986bad8f6cd50d6149345aca7f6480f49464 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:13 -0700 Subject: [PATCH] sched: balance timers Do CPU load averaging over a number of different intervals. Allow each interval to be chosen by sending a parameter to source_load and target_load. 0 is instantaneous, idx > 0 returns a decaying average with the most recent sample weighted at 2^(idx-1). To a maximum of 3 (could be easily increased). So generally a higher number will result in more conservative balancing. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 138 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 74 insertions(+), 64 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index f665de34ed82..b597b07e7911 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -206,7 +206,7 @@ struct runqueue { */ unsigned long nr_running; #ifdef CONFIG_SMP - unsigned long cpu_load; + unsigned long cpu_load[3]; #endif unsigned long long nr_switches; @@ -886,23 +886,27 @@ void kick_process(task_t *p) * We want to under-estimate the load of migration sources, to * balance conservatively. */ -static inline unsigned long source_load(int cpu) +static inline unsigned long source_load(int cpu, int type) { runqueue_t *rq = cpu_rq(cpu); unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; + if (type == 0) + return load_now; - return min(rq->cpu_load, load_now); + return min(rq->cpu_load[type-1], load_now); } /* * Return a high guess at the load of a migration-target cpu */ -static inline unsigned long target_load(int cpu) +static inline unsigned long target_load(int cpu, int type) { runqueue_t *rq = cpu_rq(cpu); unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; + if (type == 0) + return load_now; - return max(rq->cpu_load, load_now); + return max(rq->cpu_load[type-1], load_now); } #endif @@ -967,7 +971,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) runqueue_t *rq; #ifdef CONFIG_SMP unsigned long load, this_load; - struct sched_domain *sd; + struct sched_domain *sd, *this_sd = NULL; int new_cpu; #endif @@ -986,72 +990,64 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) if (unlikely(task_running(rq, p))) goto out_activate; -#ifdef CONFIG_SCHEDSTATS + new_cpu = cpu; + schedstat_inc(rq, ttwu_cnt); if (cpu == this_cpu) { schedstat_inc(rq, ttwu_local); - } else { - for_each_domain(this_cpu, sd) { - if (cpu_isset(cpu, sd->span)) { - schedstat_inc(sd, ttwu_wake_remote); - break; - } + goto out_set_cpu; + } + + for_each_domain(this_cpu, sd) { + if (cpu_isset(cpu, sd->span)) { + schedstat_inc(sd, ttwu_wake_remote); + this_sd = sd; + break; } } -#endif - new_cpu = cpu; - if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) + if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) goto out_set_cpu; - load = source_load(cpu); - this_load = target_load(this_cpu); - /* - * If sync wakeup then subtract the (maximum possible) effect of - * the currently running task from the load of the current CPU: + * Check for affine wakeup and passive balancing possibilities. */ - if (sync) - this_load -= SCHED_LOAD_SCALE; - - /* Don't pull the task off an idle CPU to a busy one */ - if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2) - goto out_set_cpu; + if (this_sd) { + int idx = this_sd->wake_idx; + unsigned int imbalance; - new_cpu = this_cpu; /* Wake to this CPU if we can */ + load = source_load(cpu, idx); + this_load = target_load(this_cpu, idx); - /* - * Scan domains for affine wakeup and passive balancing - * possibilities. - */ - for_each_domain(this_cpu, sd) { - unsigned int imbalance; /* - * Start passive balancing when half the imbalance_pct - * limit is reached. + * If sync wakeup then subtract the (maximum possible) effect of + * the currently running task from the load of the current CPU: */ - imbalance = sd->imbalance_pct + (sd->imbalance_pct - 100) / 2; + if (sync) + this_load -= SCHED_LOAD_SCALE; + + /* Don't pull the task off an idle CPU to a busy one */ + if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2) + goto out_set_cpu; - if ((sd->flags & SD_WAKE_AFFINE) && - !task_hot(p, rq->timestamp_last_tick, sd)) { + new_cpu = this_cpu; /* Wake to this CPU if we can */ + + if ((this_sd->flags & SD_WAKE_AFFINE) && + !task_hot(p, rq->timestamp_last_tick, this_sd)) { /* * This domain has SD_WAKE_AFFINE and p is cache cold * in this domain. */ - if (cpu_isset(cpu, sd->span)) { - schedstat_inc(sd, ttwu_move_affine); - goto out_set_cpu; - } - } else if ((sd->flags & SD_WAKE_BALANCE) && + schedstat_inc(this_sd, ttwu_move_affine); + goto out_set_cpu; + } else if ((this_sd->flags & SD_WAKE_BALANCE) && imbalance*this_load <= 100*load) { /* * This domain has SD_WAKE_BALANCE and there is * an imbalance. */ - if (cpu_isset(cpu, sd->span)) { - schedstat_inc(sd, ttwu_move_balance); - goto out_set_cpu; - } + schedstat_inc(this_sd, ttwu_move_balance); + goto out_set_cpu; } } @@ -1509,7 +1505,7 @@ static int find_idlest_cpu(struct task_struct *p, int this_cpu, cpus_and(mask, sd->span, p->cpus_allowed); for_each_cpu_mask(i, mask) { - load = target_load(i); + load = target_load(i, sd->wake_idx); if (load < min_load) { min_cpu = i; @@ -1522,7 +1518,7 @@ static int find_idlest_cpu(struct task_struct *p, int this_cpu, } /* add +1 to account for the new task */ - this_load = source_load(this_cpu) + SCHED_LOAD_SCALE; + this_load = source_load(this_cpu, sd->wake_idx) + SCHED_LOAD_SCALE; /* * Would with the addition of the new task to the @@ -1767,8 +1763,15 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, { struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; unsigned long max_load, avg_load, total_load, this_load, total_pwr; + int load_idx; max_load = this_load = total_load = total_pwr = 0; + if (idle == NOT_IDLE) + load_idx = sd->busy_idx; + else if (idle == NEWLY_IDLE) + load_idx = sd->newidle_idx; + else + load_idx = sd->idle_idx; do { unsigned long load; @@ -1783,9 +1786,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, for_each_cpu_mask(i, group->cpumask) { /* Bias balancing toward cpus of our domain */ if (local_group) - load = target_load(i); + load = target_load(i, load_idx); else - load = source_load(i); + load = source_load(i, load_idx); avg_load += load; } @@ -1895,7 +1898,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group) int i; for_each_cpu_mask(i, group->cpumask) { - load = source_load(i); + load = source_load(i, 0); if (load > max_load) { max_load = load; @@ -2150,18 +2153,23 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq, unsigned long old_load, this_load; unsigned long j = jiffies + CPU_OFFSET(this_cpu); struct sched_domain *sd; + int i; - /* Update our load */ - old_load = this_rq->cpu_load; this_load = this_rq->nr_running * SCHED_LOAD_SCALE; - /* - * Round up the averaging division if load is increasing. This - * prevents us from getting stuck on 9 if the load is 10, for - * example. - */ - if (this_load > old_load) - old_load++; - this_rq->cpu_load = (old_load + this_load) / 2; + /* Update our load */ + for (i = 0; i < 3; i++) { + unsigned long new_load = this_load; + int scale = 1 << i; + old_load = this_rq->cpu_load[i]; + /* + * Round up the averaging division if load is increasing. This + * prevents us from getting stuck on 9 if the load is 10, for + * example. + */ + if (new_load > old_load) + new_load += scale-1; + this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale; + } for_each_domain(this_cpu, sd) { unsigned long interval; @@ -4921,13 +4929,15 @@ void __init sched_init(void) rq = cpu_rq(i); spin_lock_init(&rq->lock); + rq->nr_running = 0; rq->active = rq->arrays; rq->expired = rq->arrays + 1; rq->best_expired_prio = MAX_PRIO; #ifdef CONFIG_SMP rq->sd = &sched_domain_dummy; - rq->cpu_load = 0; + for (j = 1; j < 3; j++) + rq->cpu_load[j] = 0; rq->active_balance = 0; rq->push_cpu = 0; rq->migration_thread = NULL; -- cgit v1.2.3 From a3f21bce1fefdf92a4d1705e888d390b10f3ac6f Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:15 -0700 Subject: [PATCH] sched: tweak affine wakeups Do less affine wakeups. We're trying to reduce dbt2-pgsql idle time regressions here... make sure we don't don't move tasks the wrong way in an imbalance condition. Also, remove the cache coldness requirement from the calculation - this seems to induce sharp cutoff points where behaviour will suddenly change on some workloads if the load creeps slightly over or under some point. It is good for periodic balancing because in that case have otherwise have no other context to determine what task to move. But also make a minor tweak to "wake balancing" - the imbalance tolerance is now set at half the domain's imbalance, so we get the opportunity to do wake balancing before the more random periodic rebalancing gets preformed. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 57 ++++++++++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index b597b07e7911..5ae3568eed0b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1016,38 +1016,45 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) int idx = this_sd->wake_idx; unsigned int imbalance; + imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; + load = source_load(cpu, idx); this_load = target_load(this_cpu, idx); - /* - * If sync wakeup then subtract the (maximum possible) effect of - * the currently running task from the load of the current CPU: - */ - if (sync) - this_load -= SCHED_LOAD_SCALE; - - /* Don't pull the task off an idle CPU to a busy one */ - if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2) - goto out_set_cpu; - new_cpu = this_cpu; /* Wake to this CPU if we can */ - if ((this_sd->flags & SD_WAKE_AFFINE) && - !task_hot(p, rq->timestamp_last_tick, this_sd)) { - /* - * This domain has SD_WAKE_AFFINE and p is cache cold - * in this domain. - */ - schedstat_inc(this_sd, ttwu_move_affine); - goto out_set_cpu; - } else if ((this_sd->flags & SD_WAKE_BALANCE) && - imbalance*this_load <= 100*load) { + if (this_sd->flags & SD_WAKE_AFFINE) { + unsigned long tl = this_load; /* - * This domain has SD_WAKE_BALANCE and there is - * an imbalance. + * If sync wakeup then subtract the (maximum possible) + * effect of the currently running task from the load + * of the current CPU: */ - schedstat_inc(this_sd, ttwu_move_balance); - goto out_set_cpu; + if (sync) + tl -= SCHED_LOAD_SCALE; + + if ((tl <= load && + tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) || + 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) { + /* + * This domain has SD_WAKE_AFFINE and + * p is cache cold in this domain, and + * there is no bad imbalance. + */ + schedstat_inc(this_sd, ttwu_move_affine); + goto out_set_cpu; + } + } + + /* + * Start passive balancing when half the imbalance_pct + * limit is reached. + */ + if (this_sd->flags & SD_WAKE_BALANCE) { + if (imbalance*this_load <= 100*load) { + schedstat_inc(this_sd, ttwu_move_balance); + goto out_set_cpu; + } } } -- cgit v1.2.3 From cafb20c1f9976a70d633bb1e1c8c24eab00e4e80 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:17 -0700 Subject: [PATCH] sched: no aggressive idle balancing Remove the very aggressive idle stuff that has recently gone into 2.6 - it is going against the direction we are trying to go. Hopefully we can regain performance through other methods. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 5ae3568eed0b..396724a2519f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -414,22 +414,6 @@ static inline runqueue_t *this_rq_lock(void) return rq; } -#ifdef CONFIG_SCHED_SMT -static int cpu_and_siblings_are_idle(int cpu) -{ - int sib; - for_each_cpu_mask(sib, cpu_sibling_map[cpu]) { - if (idle_cpu(sib)) - continue; - return 0; - } - - return 1; -} -#else -#define cpu_and_siblings_are_idle(A) idle_cpu(A) -#endif - #ifdef CONFIG_SCHEDSTATS /* * Called when a process is dequeued from the active array and given @@ -1652,12 +1636,11 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, /* * Aggressive migration if: - * 1) the [whole] cpu is idle, or + * 1) task is cache cold, or * 2) too many balance attempts have failed. */ - if (cpu_and_siblings_are_idle(this_cpu) || \ - sd->nr_balance_failed > sd->cache_nice_tries) + if (sd->nr_balance_failed > sd->cache_nice_tries) return 1; if (task_hot(p, rq->timestamp_last_tick, sd)) -- cgit v1.2.3 From 147cbb4bbe991452698f0772d8292f22825710ba Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:19 -0700 Subject: [PATCH] sched: balance on fork Reimplement the balance on exec balancing to be sched-domains aware. Use this to also do balance on fork balancing. Make x86_64 do balance on fork over the NUMA domain. The problem that the non sched domains aware blancing became apparent on dual core, multi socket opterons. What we want is for the new tasks to be sent to a different socket, but more often than not, we would first load up our sibling core, or fill two cores of a single remote socket before selecting a new one. This gives large improvements to STREAM on such systems. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 164 ++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 109 insertions(+), 55 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 396724a2519f..7ecc237e2aab 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -893,6 +893,79 @@ static inline unsigned long target_load(int cpu, int type) return max(rq->cpu_load[type-1], load_now); } +/* + * find_idlest_group finds and returns the least busy CPU group within the + * domain. + */ +static struct sched_group * +find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) +{ + struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; + unsigned long min_load = ULONG_MAX, this_load = 0; + int load_idx = sd->forkexec_idx; + int imbalance = 100 + (sd->imbalance_pct-100)/2; + + do { + unsigned long load, avg_load; + int local_group; + int i; + + local_group = cpu_isset(this_cpu, group->cpumask); + /* XXX: put a cpus allowed check */ + + /* Tally up the load of all CPUs in the group */ + avg_load = 0; + + for_each_cpu_mask(i, group->cpumask) { + /* Bias balancing toward cpus of our domain */ + if (local_group) + load = source_load(i, load_idx); + else + load = target_load(i, load_idx); + + avg_load += load; + } + + /* Adjust by relative CPU power of the group */ + avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; + + if (local_group) { + this_load = avg_load; + this = group; + } else if (avg_load < min_load) { + min_load = avg_load; + idlest = group; + } + group = group->next; + } while (group != sd->groups); + + if (!idlest || 100*this_load < imbalance*min_load) + return NULL; + return idlest; +} + +/* + * find_idlest_queue - find the idlest runqueue among the cpus in group. + */ +static int find_idlest_cpu(struct sched_group *group, int this_cpu) +{ + unsigned long load, min_load = ULONG_MAX; + int idlest = -1; + int i; + + for_each_cpu_mask(i, group->cpumask) { + load = source_load(i, 0); + + if (load < min_load || (load == min_load && i == this_cpu)) { + min_load = load; + idlest = i; + } + } + + return idlest; +} + + #endif /* @@ -1107,11 +1180,6 @@ int fastcall wake_up_state(task_t *p, unsigned int state) return try_to_wake_up(p, state, 0); } -#ifdef CONFIG_SMP -static int find_idlest_cpu(struct task_struct *p, int this_cpu, - struct sched_domain *sd); -#endif - /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. @@ -1181,12 +1249,38 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) unsigned long flags; int this_cpu, cpu; runqueue_t *rq, *this_rq; +#ifdef CONFIG_SMP + struct sched_domain *tmp, *sd = NULL; +#endif rq = task_rq_lock(p, &flags); - cpu = task_cpu(p); + BUG_ON(p->state != TASK_RUNNING); this_cpu = smp_processor_id(); + cpu = task_cpu(p); - BUG_ON(p->state != TASK_RUNNING); +#ifdef CONFIG_SMP + for_each_domain(cpu, tmp) + if (tmp->flags & SD_BALANCE_FORK) + sd = tmp; + + if (sd) { + struct sched_group *group; + + cpu = task_cpu(p); + group = find_idlest_group(sd, p, cpu); + if (group) { + int new_cpu; + new_cpu = find_idlest_cpu(group, cpu); + if (new_cpu != -1 && new_cpu != cpu && + cpu_isset(new_cpu, p->cpus_allowed)) { + set_task_cpu(p, new_cpu); + task_rq_unlock(rq, &flags); + rq = task_rq_lock(p, &flags); + cpu = task_cpu(p); + } + } + } +#endif /* * We decrease the sleep average of forking parents @@ -1480,51 +1574,6 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) } } -/* - * find_idlest_cpu - find the least busy runqueue. - */ -static int find_idlest_cpu(struct task_struct *p, int this_cpu, - struct sched_domain *sd) -{ - unsigned long load, min_load, this_load; - int i, min_cpu; - cpumask_t mask; - - min_cpu = UINT_MAX; - min_load = ULONG_MAX; - - cpus_and(mask, sd->span, p->cpus_allowed); - - for_each_cpu_mask(i, mask) { - load = target_load(i, sd->wake_idx); - - if (load < min_load) { - min_cpu = i; - min_load = load; - - /* break out early on an idle CPU: */ - if (!min_load) - break; - } - } - - /* add +1 to account for the new task */ - this_load = source_load(this_cpu, sd->wake_idx) + SCHED_LOAD_SCALE; - - /* - * Would with the addition of the new task to the - * current CPU there be an imbalance between this - * CPU and the idlest CPU? - * - * Use half of the balancing threshold - new-context is - * a good opportunity to balance. - */ - if (min_load*(100 + (sd->imbalance_pct-100)/2) < this_load*100) - return min_cpu; - - return this_cpu; -} - /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -1578,8 +1627,15 @@ void sched_exec(void) sd = tmp; if (sd) { + struct sched_group *group; schedstat_inc(sd, sbe_attempts); - new_cpu = find_idlest_cpu(current, this_cpu, sd); + group = find_idlest_group(sd, current, this_cpu); + if (!group) + goto out; + new_cpu = find_idlest_cpu(group, this_cpu); + if (new_cpu == -1) + goto out; + if (new_cpu != this_cpu) { schedstat_inc(sd, sbe_pushed); put_cpu(); @@ -1792,12 +1848,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (local_group) { this_load = avg_load; this = group; - goto nextgroup; } else if (avg_load > max_load) { max_load = avg_load; busiest = group; } -nextgroup: group = group->next; } while (group != sd->groups); -- cgit v1.2.3 From 68767a0ae428801649d510d9a65bb71feed44dd1 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:20 -0700 Subject: [PATCH] sched: schedstats update for balance on fork Add SCHEDSTAT statistics for sched-balance-fork. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 63 +++++++++++++++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 7ecc237e2aab..2711130cd973 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -309,7 +309,7 @@ static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) * bump this up when changing the output format or the meaning of an existing * format, so that tools can adapt (or abort) */ -#define SCHEDSTAT_VERSION 11 +#define SCHEDSTAT_VERSION 12 static int show_schedstat(struct seq_file *seq, void *v) { @@ -356,9 +356,10 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->lb_nobusyq[itype], sd->lb_nobusyg[itype]); } - seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu\n", + seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", sd->alb_cnt, sd->alb_failed, sd->alb_pushed, - sd->sbe_pushed, sd->sbe_attempts, + sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, + sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); } #endif @@ -1264,24 +1265,34 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) sd = tmp; if (sd) { + int new_cpu; struct sched_group *group; + schedstat_inc(sd, sbf_cnt); cpu = task_cpu(p); group = find_idlest_group(sd, p, cpu); - if (group) { - int new_cpu; - new_cpu = find_idlest_cpu(group, cpu); - if (new_cpu != -1 && new_cpu != cpu && - cpu_isset(new_cpu, p->cpus_allowed)) { - set_task_cpu(p, new_cpu); - task_rq_unlock(rq, &flags); - rq = task_rq_lock(p, &flags); - cpu = task_cpu(p); - } + if (!group) { + schedstat_inc(sd, sbf_balanced); + goto no_forkbalance; + } + + new_cpu = find_idlest_cpu(group, cpu); + if (new_cpu == -1 || new_cpu == cpu) { + schedstat_inc(sd, sbf_balanced); + goto no_forkbalance; + } + + if (cpu_isset(new_cpu, p->cpus_allowed)) { + schedstat_inc(sd, sbf_pushed); + set_task_cpu(p, new_cpu); + task_rq_unlock(rq, &flags); + rq = task_rq_lock(p, &flags); + cpu = task_cpu(p); } } -#endif +no_forkbalance: +#endif /* * We decrease the sleep average of forking parents * and children as well, to keep max-interactive tasks @@ -1618,30 +1629,28 @@ void sched_exec(void) struct sched_domain *tmp, *sd = NULL; int new_cpu, this_cpu = get_cpu(); - /* Prefer the current CPU if there's only this task running */ - if (this_rq()->nr_running <= 1) - goto out; - for_each_domain(this_cpu, tmp) if (tmp->flags & SD_BALANCE_EXEC) sd = tmp; if (sd) { struct sched_group *group; - schedstat_inc(sd, sbe_attempts); + schedstat_inc(sd, sbe_cnt); group = find_idlest_group(sd, current, this_cpu); - if (!group) + if (!group) { + schedstat_inc(sd, sbe_balanced); goto out; + } new_cpu = find_idlest_cpu(group, this_cpu); - if (new_cpu == -1) + if (new_cpu == -1 || new_cpu == this_cpu) { + schedstat_inc(sd, sbe_balanced); goto out; - - if (new_cpu != this_cpu) { - schedstat_inc(sd, sbe_pushed); - put_cpu(); - sched_migrate_task(current, new_cpu); - return; } + + schedstat_inc(sd, sbe_pushed); + put_cpu(); + sched_migrate_task(current, new_cpu); + return; } out: put_cpu(); -- cgit v1.2.3 From 48c08d3f8ff94fa118187e4d8d4a5707bb85e59d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 25 Jun 2005 14:57:22 -0700 Subject: [PATCH] sched: uninline task_timeslice "Chen, Kenneth W" uninline task_timeslice() - reduces code footprint noticeably, and it's slowpath code. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 2711130cd973..98bf1c091da5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -166,7 +166,7 @@ #define SCALE_PRIO(x, prio) \ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE) -static inline unsigned int task_timeslice(task_t *p) +static unsigned int task_timeslice(task_t *p) { if (p->static_prio < NICE_TO_PRIO(0)) return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio); -- cgit v1.2.3 From 4866cde064afbb6c2a488c265e696879de616daa Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:23 -0700 Subject: [PATCH] sched: cleanup context switch locking Instead of requiring architecture code to interact with the scheduler's locking implementation, provide a couple of defines that can be used by the architecture to request runqueue unlocked context switches, and ask for interrupts to be enabled over the context switch. Also replaces the "switch_lock" used by these architectures with an oncpu flag (note, not a potentially slow bitflag). This eliminates one bus locked memory operation when context switching, and simplifies the task_running function. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 132 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 108 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 98bf1c091da5..b1410577f9a8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -268,14 +268,71 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) -/* - * Default context-switch locking: - */ #ifndef prepare_arch_switch -# define prepare_arch_switch(rq, next) do { } while (0) -# define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock) -# define task_running(rq, p) ((rq)->curr == (p)) +# define prepare_arch_switch(next) do { } while (0) +#endif +#ifndef finish_arch_switch +# define finish_arch_switch(prev) do { } while (0) +#endif + +#ifndef __ARCH_WANT_UNLOCKED_CTXSW +static inline int task_running(runqueue_t *rq, task_t *p) +{ + return rq->curr == p; +} + +static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +{ +} + +static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +{ + spin_unlock_irq(&rq->lock); +} + +#else /* __ARCH_WANT_UNLOCKED_CTXSW */ +static inline int task_running(runqueue_t *rq, task_t *p) +{ +#ifdef CONFIG_SMP + return p->oncpu; +#else + return rq->curr == p; +#endif +} + +static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +{ +#ifdef CONFIG_SMP + /* + * We can optimise this out completely for !SMP, because the + * SMP rebalancing from interrupt is the only thing that cares + * here. + */ + next->oncpu = 1; +#endif +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + spin_unlock_irq(&rq->lock); +#else + spin_unlock(&rq->lock); #endif +} + +static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +{ +#ifdef CONFIG_SMP + /* + * After ->oncpu is cleared, the task can be moved to a different CPU. + * We must ensure this doesn't happen until the switch is completely + * finished. + */ + smp_wmb(); + prev->oncpu = 0; +#endif +#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_enable(); +#endif +} +#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ /* * task_rq_lock - lock the runqueue a given task resides on and disable @@ -1196,17 +1253,14 @@ void fastcall sched_fork(task_t *p) p->state = TASK_RUNNING; INIT_LIST_HEAD(&p->run_list); p->array = NULL; - spin_lock_init(&p->switch_lock); #ifdef CONFIG_SCHEDSTATS memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) + p->oncpu = 0; +#endif #ifdef CONFIG_PREEMPT - /* - * During context-switch we hold precisely one spinlock, which - * schedule_tail drops. (in the common case it's this_rq()->lock, - * but it also can be p->switch_lock.) So we compensate with a count - * of 1. Also, we want to start with kernel preemption disabled. - */ + /* Want to start with kernel preemption disabled. */ p->thread_info->preempt_count = 1; #endif /* @@ -1387,23 +1441,41 @@ void fastcall sched_exit(task_t * p) task_rq_unlock(rq, &flags); } +/** + * prepare_task_switch - prepare to switch tasks + * @rq: the runqueue preparing to switch + * @next: the task we are going to switch to. + * + * This is called with the rq lock held and interrupts off. It must + * be paired with a subsequent finish_task_switch after the context + * switch. + * + * prepare_task_switch sets up locking and calls architecture specific + * hooks. + */ +static inline void prepare_task_switch(runqueue_t *rq, task_t *next) +{ + prepare_lock_switch(rq, next); + prepare_arch_switch(next); +} + /** * finish_task_switch - clean up after a task-switch * @prev: the thread we just switched away from. * - * We enter this with the runqueue still locked, and finish_arch_switch() - * will unlock it along with doing any other architecture-specific cleanup - * actions. + * finish_task_switch must be called after the context switch, paired + * with a prepare_task_switch call before the context switch. + * finish_task_switch will reconcile locking set up by prepare_task_switch, + * and do any other architecture-specific cleanup actions. * * Note that we may have delayed dropping an mm in context_switch(). If * so, we finish that here outside of the runqueue lock. (Doing it * with the lock held can cause deadlocks; see schedule() for * details.) */ -static inline void finish_task_switch(task_t *prev) +static inline void finish_task_switch(runqueue_t *rq, task_t *prev) __releases(rq->lock) { - runqueue_t *rq = this_rq(); struct mm_struct *mm = rq->prev_mm; unsigned long prev_task_flags; @@ -1421,7 +1493,8 @@ static inline void finish_task_switch(task_t *prev) * Manfred Spraul */ prev_task_flags = prev->flags; - finish_arch_switch(rq, prev); + finish_arch_switch(prev); + finish_lock_switch(rq, prev); if (mm) mmdrop(mm); if (unlikely(prev_task_flags & PF_DEAD)) @@ -1435,8 +1508,12 @@ static inline void finish_task_switch(task_t *prev) asmlinkage void schedule_tail(task_t *prev) __releases(rq->lock) { - finish_task_switch(prev); - + runqueue_t *rq = this_rq(); + finish_task_switch(rq, prev); +#ifdef __ARCH_WANT_UNLOCKED_CTXSW + /* In this case, finish_task_switch does not reenable preemption */ + preempt_enable(); +#endif if (current->set_child_tid) put_user(current->pid, current->set_child_tid); } @@ -2816,11 +2893,15 @@ switch_tasks: rq->curr = next; ++*switch_count; - prepare_arch_switch(rq, next); + prepare_task_switch(rq, next); prev = context_switch(rq, prev, next); barrier(); - - finish_task_switch(prev); + /* + * this_rq must be evaluated again because prev may have moved + * CPUs since it called schedule(), thus the 'rq' on its stack + * frame will be invalid. + */ + finish_task_switch(this_rq(), prev); } else spin_unlock_irq(&rq->lock); @@ -4085,6 +4166,9 @@ void __devinit init_idle(task_t *idle, int cpu) spin_lock_irqsave(&rq->lock, flags); rq->curr = rq->idle = idle; +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) + idle->oncpu = 1; +#endif set_tsk_need_resched(idle); spin_unlock_irqrestore(&rq->lock, flags); -- cgit v1.2.3 From 41c7ce9ad9a859871dffbe7dbc8b1f9571724e3c Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:24 -0700 Subject: [PATCH] sched: null domains Fix the last 2 places that directly access a runqueue's sched-domain and assume it cannot be NULL. That allows the use of NULL for domain, instead of a dummy domain, to signify no balancing is to happen. No functional changes. Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index b1410577f9a8..77c07c2928b9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2579,11 +2579,15 @@ out: #ifdef CONFIG_SCHED_SMT static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { - struct sched_domain *sd = this_rq->sd; + struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; int i; - if (!(sd->flags & SD_SHARE_CPUPOWER)) + for_each_domain(this_cpu, tmp) + if (tmp->flags & SD_SHARE_CPUPOWER) + sd = tmp; + + if (!sd) return; /* @@ -2624,13 +2628,17 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { - struct sched_domain *sd = this_rq->sd; + struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; prio_array_t *array; int ret = 0, i; task_t *p; - if (!(sd->flags & SD_SHARE_CPUPOWER)) + for_each_domain(this_cpu, tmp) + if (tmp->flags & SD_SHARE_CPUPOWER) + sd = tmp; + + if (!sd) return 0; /* @@ -4617,6 +4625,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) { int level = 0; + if (!sd) { + printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); + return; + } + printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); do { @@ -4874,7 +4887,7 @@ static void __devinit arch_init_sched_domains(void) cpus_and(cpu_default_map, cpu_default_map, cpu_online_map); /* - * Set up domains. Isolated domains just stay on the dummy domain. + * Set up domains. Isolated domains just stay on the NULL domain. */ for_each_cpu_mask(i, cpu_default_map) { int group; @@ -4987,18 +5000,11 @@ static void __devinit arch_destroy_sched_domains(void) #endif /* ARCH_HAS_SCHED_DOMAIN */ -/* - * Initial dummy domain for early boot and for hotplug cpu. Being static, - * it is initialized to zero, so all balancing flags are cleared which is - * what we want. - */ -static struct sched_domain sched_domain_dummy; - #ifdef CONFIG_HOTPLUG_CPU /* * Force a reinitialization of the sched domains hierarchy. The domains * and groups cannot be updated in place without racing with the balancing - * code, so we temporarily attach all running cpus to a "dummy" domain + * code, so we temporarily attach all running cpus to the NULL domain * which will prevent rebalancing while the sched domains are recalculated. */ static int update_sched_domains(struct notifier_block *nfb, @@ -5010,7 +5016,7 @@ static int update_sched_domains(struct notifier_block *nfb, case CPU_UP_PREPARE: case CPU_DOWN_PREPARE: for_each_online_cpu(i) - cpu_attach_domain(&sched_domain_dummy, i); + cpu_attach_domain(NULL, i); arch_destroy_sched_domains(); return NOTIFY_OK; @@ -5072,7 +5078,7 @@ void __init sched_init(void) rq->best_expired_prio = MAX_PRIO; #ifdef CONFIG_SMP - rq->sd = &sched_domain_dummy; + rq->sd = NULL; for (j = 1; j < 3; j++) rq->cpu_load[j] = 0; rq->active_balance = 0; -- cgit v1.2.3 From 245af2c7870bd5940f7bfad19a0a03b32751fbc5 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Sat, 25 Jun 2005 14:57:25 -0700 Subject: [PATCH] sched: remove degenerate domains Remove degenerate scheduler domains during the sched-domain init. For example on x86_64, we always have NUMA configured in. On Intel EM64T systems, top most sched domain will be of NUMA and with only one sched_group in it. With fork/exec balances(recent Nick's fixes in -mm tree), we always endup taking wrong decisions because of this topmost domain (as it contains only one group and find_idlest_group always returns NULL). We will endup loading HT package completely first, letting active load balance kickin and correct it. In general, this patch also makes sense with out recent Nick's fixes in -mm. From: Nick Piggin Modified to account for more than just sched_groups when scanning for degenerate domains by Nick Piggin. And allow a runqueue's sd to go NULL rather than keep a single degenerate domain around (this happens when you run with maxcpus=1). Signed-off-by: Suresh Siddha Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 77c07c2928b9..e75b301b5340 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4712,6 +4712,57 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) #define sched_domain_debug(sd, cpu) {} #endif +static int __devinit sd_degenerate(struct sched_domain *sd) +{ + if (cpus_weight(sd->span) == 1) + return 1; + + /* Following flags need at least 2 groups */ + if (sd->flags & (SD_LOAD_BALANCE | + SD_BALANCE_NEWIDLE | + SD_BALANCE_FORK | + SD_BALANCE_EXEC)) { + if (sd->groups != sd->groups->next) + return 0; + } + + /* Following flags don't use groups */ + if (sd->flags & (SD_WAKE_IDLE | + SD_WAKE_AFFINE | + SD_WAKE_BALANCE)) + return 0; + + return 1; +} + +static int __devinit sd_parent_degenerate(struct sched_domain *sd, + struct sched_domain *parent) +{ + unsigned long cflags = sd->flags, pflags = parent->flags; + + if (sd_degenerate(parent)) + return 1; + + if (!cpus_equal(sd->span, parent->span)) + return 0; + + /* Does parent contain flags not in child? */ + /* WAKE_BALANCE is a subset of WAKE_AFFINE */ + if (cflags & SD_WAKE_AFFINE) + pflags &= ~SD_WAKE_BALANCE; + /* Flags needing groups don't count if only 1 group in parent */ + if (parent->groups == parent->groups->next) { + pflags &= ~(SD_LOAD_BALANCE | + SD_BALANCE_NEWIDLE | + SD_BALANCE_FORK | + SD_BALANCE_EXEC); + } + if (~cflags & pflags) + return 0; + + return 1; +} + /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. @@ -4722,6 +4773,19 @@ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) unsigned long flags; runqueue_t *rq = cpu_rq(cpu); int local = 1; + struct sched_domain *tmp; + + /* Remove the sched domains which do not contribute to scheduling. */ + for (tmp = sd; tmp; tmp = tmp->parent) { + struct sched_domain *parent = tmp->parent; + if (!parent) + break; + if (sd_parent_degenerate(tmp, parent)) + tmp->parent = parent->parent; + } + + if (sd && sd_degenerate(sd)) + sd = sd->parent; sched_domain_debug(sd, cpu); -- cgit v1.2.3 From 3dbd5342074a1e570ec84edf859deb9be588006d Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:26 -0700 Subject: [PATCH] sched: multilevel sbe sbf The fundamental problem that Suresh has with balance on exec and fork is that it only tries to balance the top level domain with the flag set. This was worked around by removing degenerate domains, but is still a problem if people want to start using more complex sched-domains, especially multilevel NUMA that ia64 is already using. This patch makes balance on fork and exec try balancing over not just the top most domain with the flag set, but all the way down the domain tree. Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 45 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index e75b301b5340..ef32389ee768 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1319,21 +1319,24 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) sd = tmp; if (sd) { + cpumask_t span; int new_cpu; struct sched_group *group; +again: schedstat_inc(sd, sbf_cnt); + span = sd->span; cpu = task_cpu(p); group = find_idlest_group(sd, p, cpu); if (!group) { schedstat_inc(sd, sbf_balanced); - goto no_forkbalance; + goto nextlevel; } new_cpu = find_idlest_cpu(group, cpu); if (new_cpu == -1 || new_cpu == cpu) { schedstat_inc(sd, sbf_balanced); - goto no_forkbalance; + goto nextlevel; } if (cpu_isset(new_cpu, p->cpus_allowed)) { @@ -1343,9 +1346,21 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) rq = task_rq_lock(p, &flags); cpu = task_cpu(p); } + + /* Now try balancing at a lower domain level */ +nextlevel: + sd = NULL; + for_each_domain(cpu, tmp) { + if (cpus_subset(span, tmp->span)) + break; + if (tmp->flags & SD_BALANCE_FORK) + sd = tmp; + } + + if (sd) + goto again; } -no_forkbalance: #endif /* * We decrease the sleep average of forking parents @@ -1711,25 +1726,41 @@ void sched_exec(void) sd = tmp; if (sd) { + cpumask_t span; struct sched_group *group; +again: schedstat_inc(sd, sbe_cnt); + span = sd->span; group = find_idlest_group(sd, current, this_cpu); if (!group) { schedstat_inc(sd, sbe_balanced); - goto out; + goto nextlevel; } new_cpu = find_idlest_cpu(group, this_cpu); if (new_cpu == -1 || new_cpu == this_cpu) { schedstat_inc(sd, sbe_balanced); - goto out; + goto nextlevel; } schedstat_inc(sd, sbe_pushed); put_cpu(); sched_migrate_task(current, new_cpu); - return; + + /* Now try balancing at a lower domain level */ + this_cpu = get_cpu(); +nextlevel: + sd = NULL; + for_each_domain(this_cpu, tmp) { + if (cpus_subset(span, tmp->span)) + break; + if (tmp->flags & SD_BALANCE_EXEC) + sd = tmp; + } + + if (sd) + goto again; } -out: + put_cpu(); } -- cgit v1.2.3 From 674311d5b411e9042df4fdf7aef0b3c8217b6240 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:27 -0700 Subject: [PATCH] sched: RCU domains One of the problems with the multilevel balance-on-fork/exec is that it needs to jump through hoops to satisfy sched-domain's locking semantics (that is, you may traverse your own domain when not preemptable, and you may traverse others' domains when holding their runqueue lock). balance-on-exec had to potentially migrate between more than one CPU before finding a final CPU to migrate to, and balance-on-fork needed to potentially take multiple runqueue locks. So bite the bullet and make sched-domains go completely RCU. This actually simplifies the code quite a bit. From: Ingo Molnar schedstats RCU fix, and a nice comment on for_each_domain, from Ingo. Signed-off-by: Ingo Molnar Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 60 +++++++++++++++------------------------------------------- 1 file changed, 15 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index ef32389ee768..54ce787b6207 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -260,8 +260,15 @@ struct runqueue { static DEFINE_PER_CPU(struct runqueue, runqueues); +/* + * The domain tree (rq->sd) is protected by RCU's quiescent state transition. + * See update_sched_domains: synchronize_kernel for details. + * + * The domain tree of any CPU may only be accessed from within + * preempt-disabled sections. + */ #define for_each_domain(cpu, domain) \ - for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent) +for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent) #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) #define this_rq() (&__get_cpu_var(runqueues)) @@ -395,6 +402,7 @@ static int show_schedstat(struct seq_file *seq, void *v) #ifdef CONFIG_SMP /* domain-specific stats */ + preempt_disable(); for_each_domain(cpu, sd) { enum idle_type itype; char mask_str[NR_CPUS]; @@ -419,6 +427,7 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); } + preempt_enable(); #endif } return 0; @@ -824,22 +833,12 @@ inline int task_curr(const task_t *p) } #ifdef CONFIG_SMP -enum request_type { - REQ_MOVE_TASK, - REQ_SET_DOMAIN, -}; - typedef struct { struct list_head list; - enum request_type type; - /* For REQ_MOVE_TASK */ task_t *task; int dest_cpu; - /* For REQ_SET_DOMAIN */ - struct sched_domain *sd; - struct completion done; } migration_req_t; @@ -861,7 +860,6 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) } init_completion(&req->done); - req->type = REQ_MOVE_TASK; req->task = p; req->dest_cpu = dest_cpu; list_add(&req->list, &rq->migration_queue); @@ -4378,17 +4376,9 @@ static int migration_thread(void * data) req = list_entry(head->next, migration_req_t, list); list_del_init(head->next); - if (req->type == REQ_MOVE_TASK) { - spin_unlock(&rq->lock); - __migrate_task(req->task, cpu, req->dest_cpu); - local_irq_enable(); - } else if (req->type == REQ_SET_DOMAIN) { - rq->sd = req->sd; - spin_unlock_irq(&rq->lock); - } else { - spin_unlock_irq(&rq->lock); - WARN_ON(1); - } + spin_unlock(&rq->lock); + __migrate_task(req->task, cpu, req->dest_cpu); + local_irq_enable(); complete(&req->done); } @@ -4619,7 +4609,6 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, migration_req_t *req; req = list_entry(rq->migration_queue.next, migration_req_t, list); - BUG_ON(req->type != REQ_MOVE_TASK); list_del_init(&req->list); complete(&req->done); } @@ -4800,10 +4789,7 @@ static int __devinit sd_parent_degenerate(struct sched_domain *sd, */ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) { - migration_req_t req; - unsigned long flags; runqueue_t *rq = cpu_rq(cpu); - int local = 1; struct sched_domain *tmp; /* Remove the sched domains which do not contribute to scheduling. */ @@ -4820,24 +4806,7 @@ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) sched_domain_debug(sd, cpu); - spin_lock_irqsave(&rq->lock, flags); - - if (cpu == smp_processor_id() || !cpu_online(cpu)) { - rq->sd = sd; - } else { - init_completion(&req.done); - req.type = REQ_SET_DOMAIN; - req.sd = sd; - list_add(&req.list, &rq->migration_queue); - local = 0; - } - - spin_unlock_irqrestore(&rq->lock, flags); - - if (!local) { - wake_up_process(rq->migration_thread); - wait_for_completion(&req.done); - } + rcu_assign_pointer(rq->sd, sd); } /* cpus with isolated domains */ @@ -5112,6 +5081,7 @@ static int update_sched_domains(struct notifier_block *nfb, case CPU_DOWN_PREPARE: for_each_online_cpu(i) cpu_attach_domain(NULL, i); + synchronize_kernel(); arch_destroy_sched_domains(); return NOTIFY_OK; -- cgit v1.2.3 From 476d139c218e44e045e4bc6d4cc02b010b343939 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:29 -0700 Subject: [PATCH] sched: consolidate sbe sbf Consolidate balance-on-exec with balance-on-fork. This is made easy by the sched-domains RCU patches. As well as the general goodness of code reduction, this allows the runqueues to be unlocked during balance-on-fork. schedstats is a problem. Maybe just have balance-on-event instead of distinguishing fork and exec? Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 21 ++++--- kernel/sched.c | 174 ++++++++++++++++++++++----------------------------------- 2 files changed, 80 insertions(+), 115 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index a28d11e10877..2c7806873bfd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1003,9 +1003,6 @@ static task_t *copy_process(unsigned long clone_flags, p->pdeath_signal = 0; p->exit_state = 0; - /* Perform scheduler related setup */ - sched_fork(p); - /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. @@ -1014,18 +1011,24 @@ static task_t *copy_process(unsigned long clone_flags, INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); + /* Perform scheduler related setup. Assign this task to a CPU. */ + sched_fork(p, clone_flags); + /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* - * The task hasn't been attached yet, so cpus_allowed mask cannot - * have changed. The cpus_allowed mask of the parent may have - * changed after it was copied first time, and it may then move to - * another CPU - so we re-copy it here and set the child's CPU to - * the parent's CPU. This avoids alot of nasty races. + * The task hasn't been attached yet, so its cpus_allowed mask will + * not be changed, nor will its assigned CPU. + * + * The cpus_allowed mask of the parent may have changed after it was + * copied first time - so re-copy it here, then check the child's CPU + * to ensure it is on a valid CPU (and if not, just force it back to + * parent's CPU). This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; - set_task_cpu(p, smp_processor_id()); + if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed))) + set_task_cpu(p, smp_processor_id()); /* * Check for pending SIGKILL! The new thread should not be allowed diff --git a/kernel/sched.c b/kernel/sched.c index 54ce787b6207..579da278e72f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1021,8 +1021,59 @@ static int find_idlest_cpu(struct sched_group *group, int this_cpu) return idlest; } +/* + * sched_balance_self: balance the current task (running on cpu) in domains + * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and + * SD_BALANCE_EXEC. + * + * Balance, ie. select the least loaded group. + * + * Returns the target CPU number, or the same CPU if no balancing is needed. + * + * preempt must be disabled. + */ +static int sched_balance_self(int cpu, int flag) +{ + struct task_struct *t = current; + struct sched_domain *tmp, *sd = NULL; -#endif + for_each_domain(cpu, tmp) + if (tmp->flags & flag) + sd = tmp; + + while (sd) { + cpumask_t span; + struct sched_group *group; + int new_cpu; + int weight; + + span = sd->span; + group = find_idlest_group(sd, t, cpu); + if (!group) + goto nextlevel; + + new_cpu = find_idlest_cpu(group, cpu); + if (new_cpu == -1 || new_cpu == cpu) + goto nextlevel; + + /* Now try balancing at a lower domain level */ + cpu = new_cpu; +nextlevel: + sd = NULL; + weight = cpus_weight(span); + for_each_domain(cpu, tmp) { + if (weight <= cpus_weight(tmp->span)) + break; + if (tmp->flags & flag) + sd = tmp; + } + /* while loop will break here if sd == NULL */ + } + + return cpu; +} + +#endif /* CONFIG_SMP */ /* * wake_idle() will wake a task on an idle cpu if task->cpu is @@ -1240,8 +1291,15 @@ int fastcall wake_up_state(task_t *p, unsigned int state) * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ -void fastcall sched_fork(task_t *p) +void fastcall sched_fork(task_t *p, int clone_flags) { + int cpu = get_cpu(); + +#ifdef CONFIG_SMP + cpu = sched_balance_self(cpu, SD_BALANCE_FORK); +#endif + set_task_cpu(p, cpu); + /* * We mark the process as running here, but have not actually * inserted it onto the runqueue yet. This guarantees that @@ -1282,12 +1340,10 @@ void fastcall sched_fork(task_t *p) * runqueue lock is not a problem. */ current->time_slice = 1; - preempt_disable(); scheduler_tick(); - local_irq_enable(); - preempt_enable(); - } else - local_irq_enable(); + } + local_irq_enable(); + put_cpu(); } /* @@ -1302,64 +1358,12 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) unsigned long flags; int this_cpu, cpu; runqueue_t *rq, *this_rq; -#ifdef CONFIG_SMP - struct sched_domain *tmp, *sd = NULL; -#endif rq = task_rq_lock(p, &flags); BUG_ON(p->state != TASK_RUNNING); this_cpu = smp_processor_id(); cpu = task_cpu(p); -#ifdef CONFIG_SMP - for_each_domain(cpu, tmp) - if (tmp->flags & SD_BALANCE_FORK) - sd = tmp; - - if (sd) { - cpumask_t span; - int new_cpu; - struct sched_group *group; - -again: - schedstat_inc(sd, sbf_cnt); - span = sd->span; - cpu = task_cpu(p); - group = find_idlest_group(sd, p, cpu); - if (!group) { - schedstat_inc(sd, sbf_balanced); - goto nextlevel; - } - - new_cpu = find_idlest_cpu(group, cpu); - if (new_cpu == -1 || new_cpu == cpu) { - schedstat_inc(sd, sbf_balanced); - goto nextlevel; - } - - if (cpu_isset(new_cpu, p->cpus_allowed)) { - schedstat_inc(sd, sbf_pushed); - set_task_cpu(p, new_cpu); - task_rq_unlock(rq, &flags); - rq = task_rq_lock(p, &flags); - cpu = task_cpu(p); - } - - /* Now try balancing at a lower domain level */ -nextlevel: - sd = NULL; - for_each_domain(cpu, tmp) { - if (cpus_subset(span, tmp->span)) - break; - if (tmp->flags & SD_BALANCE_FORK) - sd = tmp; - } - - if (sd) - goto again; - } - -#endif /* * We decrease the sleep average of forking parents * and children as well, to keep max-interactive tasks @@ -1708,58 +1712,16 @@ out: } /* - * sched_exec(): find the highest-level, exec-balance-capable - * domain and try to migrate the task to the least loaded CPU. - * - * execve() is a valuable balancing opportunity, because at this point - * the task has the smallest effective memory and cache footprint. + * sched_exec - execve() is a valuable balancing opportunity, because at + * this point the task has the smallest effective memory and cache footprint. */ void sched_exec(void) { - struct sched_domain *tmp, *sd = NULL; int new_cpu, this_cpu = get_cpu(); - - for_each_domain(this_cpu, tmp) - if (tmp->flags & SD_BALANCE_EXEC) - sd = tmp; - - if (sd) { - cpumask_t span; - struct sched_group *group; -again: - schedstat_inc(sd, sbe_cnt); - span = sd->span; - group = find_idlest_group(sd, current, this_cpu); - if (!group) { - schedstat_inc(sd, sbe_balanced); - goto nextlevel; - } - new_cpu = find_idlest_cpu(group, this_cpu); - if (new_cpu == -1 || new_cpu == this_cpu) { - schedstat_inc(sd, sbe_balanced); - goto nextlevel; - } - - schedstat_inc(sd, sbe_pushed); - put_cpu(); - sched_migrate_task(current, new_cpu); - - /* Now try balancing at a lower domain level */ - this_cpu = get_cpu(); -nextlevel: - sd = NULL; - for_each_domain(this_cpu, tmp) { - if (cpus_subset(span, tmp->span)) - break; - if (tmp->flags & SD_BALANCE_EXEC) - sd = tmp; - } - - if (sd) - goto again; - } - + new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC); put_cpu(); + if (new_cpu != this_cpu) + sched_migrate_task(current, new_cpu); } /* -- cgit v1.2.3 From 77391d71681d05d2f4502f91ad62618522abf624 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:30 -0700 Subject: [PATCH] sched: relax pinned balancing The maximum rebalance interval allowed by the multiprocessor balancing backoff is often not large enough to handle corner cases where there are lots of tasks pinned on a CPU. Suresh reported: I see system livelock's if for example I have 7000 processes pinned onto one cpu (this is on the fastest 8-way system I have access to). After this patch, the machine is reported to go well above this number. Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 579da278e72f..6e452eb95ac3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2030,6 +2030,12 @@ static runqueue_t *find_busiest_queue(struct sched_group *group) return busiest; } +/* + * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but + * so long as it is large enough. + */ +#define MAX_PINNED_INTERVAL 512 + /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. @@ -2042,7 +2048,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, struct sched_group *group; runqueue_t *busiest; unsigned long imbalance; - int nr_moved, all_pinned; + int nr_moved, all_pinned = 0; int active_balance = 0; spin_lock(&this_rq->lock); @@ -2133,7 +2139,8 @@ out_balanced: sd->nr_balance_failed = 0; /* tune up the balancing interval */ - if (sd->balance_interval < sd->max_interval) + if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || + (sd->balance_interval < sd->max_interval)) sd->balance_interval *= 2; return 0; -- cgit v1.2.3 From a3464a102a69a4e00efb0a763e274ce290995b4b Mon Sep 17 00:00:00 2001 From: Chen Shang Date: Sat, 25 Jun 2005 14:57:31 -0700 Subject: [PATCH] sched: micro-optimize task requeueing in schedule() micro-optimize task requeueing in schedule() & clean up recalc_task_prio(). Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 6e452eb95ac3..a3d1c8e43d34 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -673,7 +673,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq) rq->nr_running++; } -static void recalc_task_prio(task_t *p, unsigned long long now) +static int recalc_task_prio(task_t *p, unsigned long long now) { /* Caller must always ensure 'now >= p->timestamp' */ unsigned long long __sleep_time = now - p->timestamp; @@ -732,7 +732,7 @@ static void recalc_task_prio(task_t *p, unsigned long long now) } } - p->prio = effective_prio(p); + return effective_prio(p); } /* @@ -755,7 +755,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) } #endif - recalc_task_prio(p, now); + p->prio = recalc_task_prio(p, now); /* * This checks to make sure it's not an uninterruptible task @@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void) struct list_head *queue; unsigned long long now; unsigned long run_time; - int cpu, idx; + int cpu, idx, new_prio; /* * Test if we are atomic. Since do_exit() needs to call into @@ -2873,9 +2873,14 @@ go_idle: delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; array = next->array; - dequeue_task(next, array); - recalc_task_prio(next, next->timestamp + delta); - enqueue_task(next, array); + new_prio = recalc_task_prio(next, next->timestamp + delta); + + if (unlikely(next->prio != new_prio)) { + dequeue_task(next, array); + next->prio = new_prio; + enqueue_task(next, array); + } else + requeue_task(next, array); } next->activated = 0; switch_tasks: -- cgit v1.2.3 From 37e4ab3f0cba13adf3535d373fd98e5ee47b5410 Mon Sep 17 00:00:00 2001 From: Olivier Croquette Date: Sat, 25 Jun 2005 14:57:32 -0700 Subject: [PATCH] Changing RT priority without CAP_SYS_NICE Presently, a process without the capability CAP_SYS_NICE can not change its own policy, which is OK. But it can also not decrease its RT priority (if scheduled with policy SCHED_RR or SCHED_FIFO), which is what this patch changes. The rationale is the same as for the nice value: a process should be able to require less priority for itself. Increasing the priority is still not allowed. This is for example useful if you give a multithreaded user process a RT priority, and the process would like to organize its internal threads using priorities also. Then you can give the process the highest priority needed N, and the process starts its threads with lower priorities: N-1, N-2... The POSIX norm says that the permissions are implementation specific, so I think we can do that. In a sense, it makes the permissions consistent whatever the policy is: with this patch, process scheduled by SCHED_FIFO, SCHED_RR and SCHED_OTHER can all decrease their priority. From: Ingo Molnar cleaned up and merged to -mm. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index a3d1c8e43d34..d3d81b82e378 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3531,13 +3531,24 @@ recheck: if ((policy == SCHED_NORMAL) != (param->sched_priority == 0)) return -EINVAL; - if ((policy == SCHED_FIFO || policy == SCHED_RR) && - param->sched_priority > p->signal->rlim[RLIMIT_RTPRIO].rlim_cur && - !capable(CAP_SYS_NICE)) - return -EPERM; - if ((current->euid != p->euid) && (current->euid != p->uid) && - !capable(CAP_SYS_NICE)) - return -EPERM; + /* + * Allow unprivileged RT tasks to decrease priority: + */ + if (!capable(CAP_SYS_NICE)) { + /* can't change policy */ + if (policy != p->policy) + return -EPERM; + /* can't increase priority */ + if (policy != SCHED_NORMAL && + param->sched_priority > p->rt_priority && + param->sched_priority > + p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) + return -EPERM; + /* can't change other user's priorities */ + if ((current->euid != p->euid) && + (current->euid != p->uid)) + return -EPERM; + } retval = security_task_setscheduler(p, policy, param); if (retval) -- cgit v1.2.3 From 1a20ff27ef75d866730ee796acd811a925af762f Mon Sep 17 00:00:00 2001 From: Dinakar Guniguntala Date: Sat, 25 Jun 2005 14:57:33 -0700 Subject: [PATCH] Dynamic sched domains: sched changes The following patches add dynamic sched domains functionality that was extensively discussed on lkml and lse-tech. I would like to see this added to -mm o The main advantage with this feature is that it ensures that the scheduler load balacing code only balances against the cpus that are in the sched domain as defined by an exclusive cpuset and not all of the cpus in the system. This removes any overhead due to load balancing code trying to pull tasks outside of the cpu exclusive cpuset only to be prevented by the tasks' cpus_allowed mask. o cpu exclusive cpusets are useful for servers running orthogonal workloads such as RT applications requiring low latency and HPC applications that are throughput sensitive o It provides a new API partition_sched_domains in sched.c that makes dynamic sched domains possible. o cpu_exclusive cpusets sets are now associated with a sched domain. Which means that the users can dynamically modify the sched domains through the cpuset file system interface o ia64 sched domain code has been updated to support this feature as well o Currently, this does not support hotplug. (However some of my tests indicate hotplug+preempt is currently broken) o I have tested it extensively on x86. o This should have very minimal impact on performance as none of the fast paths are affected Signed-off-by: Dinakar Guniguntala Acked-by: Paul Jackson Acked-by: Nick Piggin Acked-by: Matthew Dobson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 132 +++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 86 insertions(+), 46 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index d3d81b82e378..dee96b22635e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -262,7 +262,7 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. - * See update_sched_domains: synchronize_kernel for details. + * See detach_destroy_domains: synchronize_sched for details. * * The domain tree of any CPU may only be accessed from within * preempt-disabled sections. @@ -4624,7 +4624,7 @@ int __init migration_init(void) #endif #ifdef CONFIG_SMP -#define SCHED_DOMAIN_DEBUG +#undef SCHED_DOMAIN_DEBUG #ifdef SCHED_DOMAIN_DEBUG static void sched_domain_debug(struct sched_domain *sd, int cpu) { @@ -4717,7 +4717,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) #define sched_domain_debug(sd, cpu) {} #endif -static int __devinit sd_degenerate(struct sched_domain *sd) +static int sd_degenerate(struct sched_domain *sd) { if (cpus_weight(sd->span) == 1) return 1; @@ -4740,7 +4740,7 @@ static int __devinit sd_degenerate(struct sched_domain *sd) return 1; } -static int __devinit sd_parent_degenerate(struct sched_domain *sd, +static int sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) { unsigned long cflags = sd->flags, pflags = parent->flags; @@ -4772,7 +4772,7 @@ static int __devinit sd_parent_degenerate(struct sched_domain *sd, * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. */ -void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) +void cpu_attach_domain(struct sched_domain *sd, int cpu) { runqueue_t *rq = cpu_rq(cpu); struct sched_domain *tmp; @@ -4823,7 +4823,7 @@ __setup ("isolcpus=", isolated_cpu_setup); * covered by the given span, and will set each group's ->cpumask correctly, * and ->cpu_power to 0. */ -void __devinit init_sched_build_groups(struct sched_group groups[], +void init_sched_build_groups(struct sched_group groups[], cpumask_t span, int (*group_fn)(int cpu)) { struct sched_group *first = NULL, *last = NULL; @@ -4859,13 +4859,14 @@ void __devinit init_sched_build_groups(struct sched_group groups[], #ifdef ARCH_HAS_SCHED_DOMAIN -extern void __devinit arch_init_sched_domains(void); -extern void __devinit arch_destroy_sched_domains(void); +extern void build_sched_domains(const cpumask_t *cpu_map); +extern void arch_init_sched_domains(const cpumask_t *cpu_map); +extern void arch_destroy_sched_domains(const cpumask_t *cpu_map); #else #ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct sched_domain, cpu_domains); static struct sched_group sched_group_cpus[NR_CPUS]; -static int __devinit cpu_to_cpu_group(int cpu) +static int cpu_to_cpu_group(int cpu) { return cpu; } @@ -4873,7 +4874,7 @@ static int __devinit cpu_to_cpu_group(int cpu) static DEFINE_PER_CPU(struct sched_domain, phys_domains); static struct sched_group sched_group_phys[NR_CPUS]; -static int __devinit cpu_to_phys_group(int cpu) +static int cpu_to_phys_group(int cpu) { #ifdef CONFIG_SCHED_SMT return first_cpu(cpu_sibling_map[cpu]); @@ -4886,7 +4887,7 @@ static int __devinit cpu_to_phys_group(int cpu) static DEFINE_PER_CPU(struct sched_domain, node_domains); static struct sched_group sched_group_nodes[MAX_NUMNODES]; -static int __devinit cpu_to_node_group(int cpu) +static int cpu_to_node_group(int cpu) { return cpu_to_node(cpu); } @@ -4917,39 +4918,28 @@ static void check_sibling_maps(void) #endif /* - * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * Build sched domains for a given set of cpus and attach the sched domains + * to the individual cpus */ -static void __devinit arch_init_sched_domains(void) +static void build_sched_domains(const cpumask_t *cpu_map) { int i; - cpumask_t cpu_default_map; - -#if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA) - check_sibling_maps(); -#endif - /* - * Setup mask for cpus without special case scheduling requirements. - * For now this just excludes isolated cpus, but could be used to - * exclude other special cases in the future. - */ - cpus_complement(cpu_default_map, cpu_isolated_map); - cpus_and(cpu_default_map, cpu_default_map, cpu_online_map); /* - * Set up domains. Isolated domains just stay on the NULL domain. + * Set up domains for cpus specified by the cpu_map. */ - for_each_cpu_mask(i, cpu_default_map) { + for_each_cpu_mask(i, *cpu_map) { int group; struct sched_domain *sd = NULL, *p; cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); - cpus_and(nodemask, nodemask, cpu_default_map); + cpus_and(nodemask, nodemask, *cpu_map); #ifdef CONFIG_NUMA sd = &per_cpu(node_domains, i); group = cpu_to_node_group(i); *sd = SD_NODE_INIT; - sd->span = cpu_default_map; + sd->span = *cpu_map; sd->groups = &sched_group_nodes[group]; #endif @@ -4967,7 +4957,7 @@ static void __devinit arch_init_sched_domains(void) group = cpu_to_cpu_group(i); *sd = SD_SIBLING_INIT; sd->span = cpu_sibling_map[i]; - cpus_and(sd->span, sd->span, cpu_default_map); + cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; sd->groups = &sched_group_cpus[group]; #endif @@ -4977,7 +4967,7 @@ static void __devinit arch_init_sched_domains(void) /* Set up CPU (sibling) groups */ for_each_online_cpu(i) { cpumask_t this_sibling_map = cpu_sibling_map[i]; - cpus_and(this_sibling_map, this_sibling_map, cpu_default_map); + cpus_and(this_sibling_map, this_sibling_map, *cpu_map); if (i != first_cpu(this_sibling_map)) continue; @@ -4990,7 +4980,7 @@ static void __devinit arch_init_sched_domains(void) for (i = 0; i < MAX_NUMNODES; i++) { cpumask_t nodemask = node_to_cpumask(i); - cpus_and(nodemask, nodemask, cpu_default_map); + cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) continue; @@ -5000,12 +4990,12 @@ static void __devinit arch_init_sched_domains(void) #ifdef CONFIG_NUMA /* Set up node groups */ - init_sched_build_groups(sched_group_nodes, cpu_default_map, + init_sched_build_groups(sched_group_nodes, *cpu_map, &cpu_to_node_group); #endif /* Calculate CPU power for physical packages and nodes */ - for_each_cpu_mask(i, cpu_default_map) { + for_each_cpu_mask(i, *cpu_map) { int power; struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT @@ -5029,7 +5019,7 @@ static void __devinit arch_init_sched_domains(void) } /* Attach the domains */ - for_each_online_cpu(i) { + for_each_cpu_mask(i, *cpu_map) { struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i); @@ -5039,16 +5029,71 @@ static void __devinit arch_init_sched_domains(void) cpu_attach_domain(sd, i); } } +/* + * Set up scheduler domains and groups. Callers must hold the hotplug lock. + */ +static void arch_init_sched_domains(cpumask_t *cpu_map) +{ + cpumask_t cpu_default_map; -#ifdef CONFIG_HOTPLUG_CPU -static void __devinit arch_destroy_sched_domains(void) +#if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA) + check_sibling_maps(); +#endif + /* + * Setup mask for cpus without special case scheduling requirements. + * For now this just excludes isolated cpus, but could be used to + * exclude other special cases in the future. + */ + cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); + + build_sched_domains(&cpu_default_map); +} + +static void arch_destroy_sched_domains(const cpumask_t *cpu_map) { /* Do nothing: everything is statically allocated. */ } -#endif #endif /* ARCH_HAS_SCHED_DOMAIN */ +/* + * Detach sched domains from a group of cpus specified in cpu_map + * These cpus will now be attached to the NULL domain + */ +static inline void detach_destroy_domains(const cpumask_t *cpu_map) +{ + int i; + + for_each_cpu_mask(i, *cpu_map) + cpu_attach_domain(NULL, i); + synchronize_sched(); + arch_destroy_sched_domains(cpu_map); +} + +/* + * Partition sched domains as specified by the cpumasks below. + * This attaches all cpus from the cpumasks to the NULL domain, + * waits for a RCU quiescent period, recalculates sched + * domain information and then attaches them back to the + * correct sched domains + * Call with hotplug lock held + */ +void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) +{ + cpumask_t change_map; + + cpus_and(*partition1, *partition1, cpu_online_map); + cpus_and(*partition2, *partition2, cpu_online_map); + cpus_or(change_map, *partition1, *partition2); + + /* Detach sched domains from all of the affected cpus */ + detach_destroy_domains(&change_map); + if (!cpus_empty(*partition1)) + build_sched_domains(partition1); + if (!cpus_empty(*partition2)) + build_sched_domains(partition2); +} + #ifdef CONFIG_HOTPLUG_CPU /* * Force a reinitialization of the sched domains hierarchy. The domains @@ -5059,15 +5104,10 @@ static void __devinit arch_destroy_sched_domains(void) static int update_sched_domains(struct notifier_block *nfb, unsigned long action, void *hcpu) { - int i; - switch (action) { case CPU_UP_PREPARE: case CPU_DOWN_PREPARE: - for_each_online_cpu(i) - cpu_attach_domain(NULL, i); - synchronize_kernel(); - arch_destroy_sched_domains(); + detach_destroy_domains(&cpu_online_map); return NOTIFY_OK; case CPU_UP_CANCELED: @@ -5083,7 +5123,7 @@ static int update_sched_domains(struct notifier_block *nfb, } /* The hotplug lock is already held by cpu_up/cpu_down */ - arch_init_sched_domains(); + arch_init_sched_domains(&cpu_online_map); return NOTIFY_OK; } @@ -5092,7 +5132,7 @@ static int update_sched_domains(struct notifier_block *nfb, void __init sched_init_smp(void) { lock_cpu_hotplug(); - arch_init_sched_domains(); + arch_init_sched_domains(&cpu_online_map); unlock_cpu_hotplug(); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); -- cgit v1.2.3 From 85d7b94981e2e919697bc235aad7367b33c3864b Mon Sep 17 00:00:00 2001 From: Dinakar Guniguntala Date: Sat, 25 Jun 2005 14:57:34 -0700 Subject: [PATCH] Dynamic sched domains: cpuset changes Adds the core update_cpu_domains code and updated cpusets documentation Signed-off-by: Dinakar Guniguntala Acked-by: Paul Jackson Acked-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 76 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 79dd929f4084..984c0bf3807f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -595,10 +595,62 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) return 0; } +/* + * For a given cpuset cur, partition the system as follows + * a. All cpus in the parent cpuset's cpus_allowed that are not part of any + * exclusive child cpusets + * b. All cpus in the current cpuset's cpus_allowed that are not part of any + * exclusive child cpusets + * Build these two partitions by calling partition_sched_domains + * + * Call with cpuset_sem held. May nest a call to the + * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. + */ +static void update_cpu_domains(struct cpuset *cur) +{ + struct cpuset *c, *par = cur->parent; + cpumask_t pspan, cspan; + + if (par == NULL || cpus_empty(cur->cpus_allowed)) + return; + + /* + * Get all cpus from parent's cpus_allowed not part of exclusive + * children + */ + pspan = par->cpus_allowed; + list_for_each_entry(c, &par->children, sibling) { + if (is_cpu_exclusive(c)) + cpus_andnot(pspan, pspan, c->cpus_allowed); + } + if (is_removed(cur) || !is_cpu_exclusive(cur)) { + cpus_or(pspan, pspan, cur->cpus_allowed); + if (cpus_equal(pspan, cur->cpus_allowed)) + return; + cspan = CPU_MASK_NONE; + } else { + if (cpus_empty(pspan)) + return; + cspan = cur->cpus_allowed; + /* + * Get all cpus from current cpuset's cpus_allowed not part + * of exclusive children + */ + list_for_each_entry(c, &cur->children, sibling) { + if (is_cpu_exclusive(c)) + cpus_andnot(cspan, cspan, c->cpus_allowed); + } + } + + lock_cpu_hotplug(); + partition_sched_domains(&pspan, &cspan); + unlock_cpu_hotplug(); +} + static int update_cpumask(struct cpuset *cs, char *buf) { struct cpuset trialcs; - int retval; + int retval, cpus_unchanged; trialcs = *cs; retval = cpulist_parse(buf, trialcs.cpus_allowed); @@ -608,9 +660,13 @@ static int update_cpumask(struct cpuset *cs, char *buf) if (cpus_empty(trialcs.cpus_allowed)) return -ENOSPC; retval = validate_change(cs, &trialcs); - if (retval == 0) - cs->cpus_allowed = trialcs.cpus_allowed; - return retval; + if (retval < 0) + return retval; + cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); + cs->cpus_allowed = trialcs.cpus_allowed; + if (is_cpu_exclusive(cs) && !cpus_unchanged) + update_cpu_domains(cs); + return 0; } static int update_nodemask(struct cpuset *cs, char *buf) @@ -646,7 +702,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) { int turning_on; struct cpuset trialcs; - int err; + int err, cpu_exclusive_changed; turning_on = (simple_strtoul(buf, NULL, 10) != 0); @@ -657,13 +713,18 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) clear_bit(bit, &trialcs.flags); err = validate_change(cs, &trialcs); - if (err == 0) { - if (turning_on) - set_bit(bit, &cs->flags); - else - clear_bit(bit, &cs->flags); - } - return err; + if (err < 0) + return err; + cpu_exclusive_changed = + (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); + if (turning_on) + set_bit(bit, &cs->flags); + else + clear_bit(bit, &cs->flags); + + if (cpu_exclusive_changed) + update_cpu_domains(cs); + return 0; } static int attach_task(struct cpuset *cs, char *buf) @@ -1309,12 +1370,14 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) up(&cpuset_sem); return -EBUSY; } - spin_lock(&cs->dentry->d_lock); parent = cs->parent; set_bit(CS_REMOVED, &cs->flags); + if (is_cpu_exclusive(cs)) + update_cpu_domains(cs); list_del(&cs->sibling); /* delete my sibling from parent->children */ if (list_empty(&parent->children)) check_for_release(parent); + spin_lock(&cs->dentry->d_lock); d = dget(cs->dentry); cs->dentry = NULL; spin_unlock(&d->d_lock); -- cgit v1.2.3 From cc19ca86a023fcd552c78e77a7be6ce271f92a28 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 25 Jun 2005 14:57:36 -0700 Subject: [PATCH] consolidate PREEMPT options into kernel/Kconfig.preempt This patch consolidates the CONFIG_PREEMPT and CONFIG_PREEMPT_BKL preemption options into kernel/Kconfig.preempt. This, besides reducing source-code, also enables more centralized tweaking of preemption related options. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Kconfig.preempt | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 kernel/Kconfig.preempt (limited to 'kernel') diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt new file mode 100644 index 000000000000..587328be8216 --- /dev/null +++ b/kernel/Kconfig.preempt @@ -0,0 +1,24 @@ + +config PREEMPT + bool "Preemptible Kernel" + help + This option reduces the latency of the kernel when reacting to + real-time or interactive events by allowing a low priority process to + be preempted even if it is in kernel mode executing a system call. + This allows applications to run more reliably even when the system is + under load. + + Say Y here if you are building a kernel for a desktop, embedded + or real-time system. Say N if you are unsure. + +config PREEMPT_BKL + bool "Preempt The Big Kernel Lock" + depends on PREEMPT + default y + help + This option reduces the latency of the kernel by making the + big kernel lock preemptible. + + Say Y here if you are building a kernel for a desktop system. + Say N if you are unsure. + -- cgit v1.2.3 From f704f56af95bec3c1ca719d64d0becef74d40899 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 25 Jun 2005 14:57:38 -0700 Subject: [PATCH] enable PREEMPT_BKL on !PREEMPT+SMP too The only sane way to clean up the current 3 lock_kernel() variants seems to be to remove the spinlock-based BKL implementations altogether, and to keep the semaphore-based one only. If we dont want to do that for whatever reason then i'm afraid we have to live with the current complexity. (but i'm open for other cleanup suggestions as well.) To explore this possibility we'll (at a minimum) have to know whether the semaphore-based BKL works fine on plain SMP too. The patch below enables this. The patch may make sense in isolation as well, as it might bring performance benefits: code that would formerly spin on the BKL spinlock will now schedule away and give up the CPU. It might introduce performance regressions as well, if any performance-critical code uses the BKL heavily and gets overscheduled due to the semaphore. I very much hope there is no such performance-critical codepath left though. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Kconfig.preempt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 587328be8216..34c631221aa3 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -13,7 +13,7 @@ config PREEMPT config PREEMPT_BKL bool "Preempt The Big Kernel Lock" - depends on PREEMPT + depends on SMP || PREEMPT default y help This option reduces the latency of the kernel by making the -- cgit v1.2.3 From f8cbd99bd3a023db8d6356d19a5f6f539d367327 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 25 Jun 2005 14:57:39 -0700 Subject: [PATCH] sched: voluntary kernel preemption This patch adds a new preemption model: 'Voluntary Kernel Preemption'. The 3 models can be selected from a new menu: (X) No Forced Preemption (Server) ( ) Voluntary Kernel Preemption (Desktop) ( ) Preemptible Kernel (Low-Latency Desktop) we still default to the stock (Server) preemption model. Voluntary preemption works by adding a cond_resched() (reschedule-if-needed) call to every might_sleep() check. It is lighter than CONFIG_PREEMPT - at the cost of not having as tight latencies. It represents a different latency/complexity/overhead tradeoff. It has no runtime impact at all if disabled. Here are size stats that show how the various preemption models impact the kernel's size: text data bss dec hex filename 3618774 547184 179896 4345854 424ffe vmlinux.stock 3626406 547184 179896 4353486 426dce vmlinux.voluntary +0.2% 3748414 548640 179896 4476950 445016 vmlinux.preempt +3.5% voluntary-preempt is +0.2% of .text, preempt is +3.5%. This feature has been tested for many months by lots of people (and it's also included in the RHEL4 distribution and earlier variants were in Fedora as well), and it's intended for users and distributions who dont want to use full-blown CONFIG_PREEMPT for one reason or another. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Kconfig.preempt | 57 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 34c631221aa3..0b46a5dff4c0 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -1,15 +1,56 @@ -config PREEMPT - bool "Preemptible Kernel" +choice + prompt "Preemption Model" + default PREEMPT_NONE + +config PREEMPT_NONE + bool "No Forced Preemption (Server)" + help + This is the traditional Linux preemption model, geared towards + throughput. It will still provide good latencies most of the + time, but there are no guarantees and occasional longer delays + are possible. + + Select this option if you are building a kernel for a server or + scientific/computation system, or if you want to maximize the + raw processing power of the kernel, irrespective of scheduling + latencies. + +config PREEMPT_VOLUNTARY + bool "Voluntary Kernel Preemption (Desktop)" help - This option reduces the latency of the kernel when reacting to - real-time or interactive events by allowing a low priority process to - be preempted even if it is in kernel mode executing a system call. - This allows applications to run more reliably even when the system is + This option reduces the latency of the kernel by adding more + "explicit preemption points" to the kernel code. These new + preemption points have been selected to reduce the maximum + latency of rescheduling, providing faster application reactions, + at the cost of slighly lower throughput. + + This allows reaction to interactive events by allowing a + low priority process to voluntarily preempt itself even if it + is in kernel mode executing a system call. This allows + applications to run more 'smoothly' even when the system is under load. - Say Y here if you are building a kernel for a desktop, embedded - or real-time system. Say N if you are unsure. + Select this if you are building a kernel for a desktop system. + +config PREEMPT + bool "Preemptible Kernel (Low-Latency Desktop)" + help + This option reduces the latency of the kernel by making + all kernel code (that is not executing in a critical section) + preemptible. This allows reaction to interactive events by + permitting a low priority process to be preempted involuntarily + even if it is in kernel mode executing a system call and would + otherwise not be about to reach a natural preemption point. + This allows applications to run more 'smoothly' even when the + system is under load, at the cost of slighly lower throughput + and a slight runtime overhead to kernel code. + + Select this if you are building a kernel for a desktop or + embedded system with latency requirements in the milliseconds + range. + +endchoice config PREEMPT_BKL bool "Preempt The Big Kernel Lock" -- cgit v1.2.3 From dc009d92435f99498cbc579ce76bf28e837e2c14 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 25 Jun 2005 14:57:52 -0700 Subject: [PATCH] kexec: add kexec syscalls This patch introduces the architecture independent implementation the sys_kexec_load, the compat_sys_kexec_load system calls. Kexec on panic support has been integrated into the core patch and is relatively clean. In addition the hopefully architecture independent option crashkernel=size@location has been docuemented. It's purpose is to reserve space for the panic kernel to live, and where no DMA transfer will ever be setup to access. Signed-off-by: Eric Biederman Signed-off-by: Alexander Nyberg Signed-off-by: Adrian Bunk Signed-off-by: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Makefile | 1 + kernel/kexec.c | 1036 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/panic.c | 23 +- kernel/sys.c | 20 ++ kernel/sys_ni.c | 2 + 5 files changed, 1080 insertions(+), 2 deletions(-) create mode 100644 kernel/kexec.c (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index b01d26fe8db7..cfc8b0dea950 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_PM) += power/ obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o +obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_COMPAT) += compat.o obj-$(CONFIG_CPUSETS) += cpuset.o obj-$(CONFIG_IKCONFIG) += configs.o diff --git a/kernel/kexec.c b/kernel/kexec.c new file mode 100644 index 000000000000..def9c73ec9a6 --- /dev/null +++ b/kernel/kexec.c @@ -0,0 +1,1036 @@ +/* + * kexec.c - kexec system call + * Copyright (C) 2002-2004 Eric Biederman + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Location of the reserved area for the crash kernel */ +struct resource crashk_res = { + .name = "Crash kernel", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; + +/* + * When kexec transitions to the new kernel there is a one-to-one + * mapping between physical and virtual addresses. On processors + * where you can disable the MMU this is trivial, and easy. For + * others it is still a simple predictable page table to setup. + * + * In that environment kexec copies the new kernel to its final + * resting place. This means I can only support memory whose + * physical address can fit in an unsigned long. In particular + * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. + * If the assembly stub has more restrictive requirements + * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be + * defined more restrictively in . + * + * The code for the transition from the current kernel to the + * the new kernel is placed in the control_code_buffer, whose size + * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single + * page of memory is necessary, but some architectures require more. + * Because this memory must be identity mapped in the transition from + * virtual to physical addresses it must live in the range + * 0 - TASK_SIZE, as only the user space mappings are arbitrarily + * modifiable. + * + * The assembly stub in the control code buffer is passed a linked list + * of descriptor pages detailing the source pages of the new kernel, + * and the destination addresses of those source pages. As this data + * structure is not used in the context of the current OS, it must + * be self-contained. + * + * The code has been made to work with highmem pages and will use a + * destination page in its final resting place (if it happens + * to allocate it). The end product of this is that most of the + * physical address space, and most of RAM can be used. + * + * Future directions include: + * - allocating a page table with the control code buffer identity + * mapped, to simplify machine_kexec and make kexec_on_panic more + * reliable. + */ + +/* + * KIMAGE_NO_DEST is an impossible destination address..., for + * allocating pages whose destination address we do not care about. + */ +#define KIMAGE_NO_DEST (-1UL) + +static int kimage_is_destination_range( + struct kimage *image, unsigned long start, unsigned long end); +static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest); + +static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, + unsigned long nr_segments, struct kexec_segment __user *segments) +{ + size_t segment_bytes; + struct kimage *image; + unsigned long i; + int result; + + /* Allocate a controlling structure */ + result = -ENOMEM; + image = kmalloc(sizeof(*image), GFP_KERNEL); + if (!image) { + goto out; + } + memset(image, 0, sizeof(*image)); + image->head = 0; + image->entry = &image->head; + image->last_entry = &image->head; + image->control_page = ~0; /* By default this does not apply */ + image->start = entry; + image->type = KEXEC_TYPE_DEFAULT; + + /* Initialize the list of control pages */ + INIT_LIST_HEAD(&image->control_pages); + + /* Initialize the list of destination pages */ + INIT_LIST_HEAD(&image->dest_pages); + + /* Initialize the list of unuseable pages */ + INIT_LIST_HEAD(&image->unuseable_pages); + + /* Read in the segments */ + image->nr_segments = nr_segments; + segment_bytes = nr_segments * sizeof(*segments); + result = copy_from_user(image->segment, segments, segment_bytes); + if (result) + goto out; + + /* + * Verify we have good destination addresses. The caller is + * responsible for making certain we don't attempt to load + * the new image into invalid or reserved areas of RAM. This + * just verifies it is an address we can use. + * + * Since the kernel does everything in page size chunks ensure + * the destination addreses are page aligned. Too many + * special cases crop of when we don't do this. The most + * insidious is getting overlapping destination addresses + * simply because addresses are changed to page size + * granularity. + */ + result = -EADDRNOTAVAIL; + for (i = 0; i < nr_segments; i++) { + unsigned long mstart, mend; + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) + goto out; + if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) + goto out; + } + + /* Verify our destination addresses do not overlap. + * If we alloed overlapping destination addresses + * through very weird things can happen with no + * easy explanation as one segment stops on another. + */ + result = -EINVAL; + for(i = 0; i < nr_segments; i++) { + unsigned long mstart, mend; + unsigned long j; + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + for(j = 0; j < i; j++) { + unsigned long pstart, pend; + pstart = image->segment[j].mem; + pend = pstart + image->segment[j].memsz; + /* Do the segments overlap ? */ + if ((mend > pstart) && (mstart < pend)) + goto out; + } + } + + /* Ensure our buffer sizes are strictly less than + * our memory sizes. This should always be the case, + * and it is easier to check up front than to be surprised + * later on. + */ + result = -EINVAL; + for(i = 0; i < nr_segments; i++) { + if (image->segment[i].bufsz > image->segment[i].memsz) + goto out; + } + + + result = 0; + out: + if (result == 0) { + *rimage = image; + } else { + kfree(image); + } + return result; + +} + +static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, + unsigned long nr_segments, struct kexec_segment __user *segments) +{ + int result; + struct kimage *image; + + /* Allocate and initialize a controlling structure */ + image = NULL; + result = do_kimage_alloc(&image, entry, nr_segments, segments); + if (result) { + goto out; + } + *rimage = image; + + /* + * Find a location for the control code buffer, and add it + * the vector of segments so that it's pages will also be + * counted as destination pages. + */ + result = -ENOMEM; + image->control_code_page = kimage_alloc_control_pages(image, + get_order(KEXEC_CONTROL_CODE_SIZE)); + if (!image->control_code_page) { + printk(KERN_ERR "Could not allocate control_code_buffer\n"); + goto out; + } + + result = 0; + out: + if (result == 0) { + *rimage = image; + } else { + kfree(image); + } + return result; +} + +static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, + unsigned long nr_segments, struct kexec_segment *segments) +{ + int result; + struct kimage *image; + unsigned long i; + + image = NULL; + /* Verify we have a valid entry point */ + if ((entry < crashk_res.start) || (entry > crashk_res.end)) { + result = -EADDRNOTAVAIL; + goto out; + } + + /* Allocate and initialize a controlling structure */ + result = do_kimage_alloc(&image, entry, nr_segments, segments); + if (result) { + goto out; + } + + /* Enable the special crash kernel control page + * allocation policy. + */ + image->control_page = crashk_res.start; + image->type = KEXEC_TYPE_CRASH; + + /* + * Verify we have good destination addresses. Normally + * the caller is responsible for making certain we don't + * attempt to load the new image into invalid or reserved + * areas of RAM. But crash kernels are preloaded into a + * reserved area of ram. We must ensure the addresses + * are in the reserved area otherwise preloading the + * kernel could corrupt things. + */ + result = -EADDRNOTAVAIL; + for (i = 0; i < nr_segments; i++) { + unsigned long mstart, mend; + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + /* Ensure we are within the crash kernel limits */ + if ((mstart < crashk_res.start) || (mend > crashk_res.end)) + goto out; + } + + + /* + * Find a location for the control code buffer, and add + * the vector of segments so that it's pages will also be + * counted as destination pages. + */ + result = -ENOMEM; + image->control_code_page = kimage_alloc_control_pages(image, + get_order(KEXEC_CONTROL_CODE_SIZE)); + if (!image->control_code_page) { + printk(KERN_ERR "Could not allocate control_code_buffer\n"); + goto out; + } + + result = 0; + out: + if (result == 0) { + *rimage = image; + } else { + kfree(image); + } + return result; +} + +static int kimage_is_destination_range( + struct kimage *image, unsigned long start, unsigned long end) +{ + unsigned long i; + + for (i = 0; i < image->nr_segments; i++) { + unsigned long mstart, mend; + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + if ((end > mstart) && (start < mend)) { + return 1; + } + } + return 0; +} + +static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order) +{ + struct page *pages; + pages = alloc_pages(gfp_mask, order); + if (pages) { + unsigned int count, i; + pages->mapping = NULL; + pages->private = order; + count = 1 << order; + for(i = 0; i < count; i++) { + SetPageReserved(pages + i); + } + } + return pages; +} + +static void kimage_free_pages(struct page *page) +{ + unsigned int order, count, i; + order = page->private; + count = 1 << order; + for(i = 0; i < count; i++) { + ClearPageReserved(page + i); + } + __free_pages(page, order); +} + +static void kimage_free_page_list(struct list_head *list) +{ + struct list_head *pos, *next; + list_for_each_safe(pos, next, list) { + struct page *page; + + page = list_entry(pos, struct page, lru); + list_del(&page->lru); + + kimage_free_pages(page); + } +} + +static struct page *kimage_alloc_normal_control_pages( + struct kimage *image, unsigned int order) +{ + /* Control pages are special, they are the intermediaries + * that are needed while we copy the rest of the pages + * to their final resting place. As such they must + * not conflict with either the destination addresses + * or memory the kernel is already using. + * + * The only case where we really need more than one of + * these are for architectures where we cannot disable + * the MMU and must instead generate an identity mapped + * page table for all of the memory. + * + * At worst this runs in O(N) of the image size. + */ + struct list_head extra_pages; + struct page *pages; + unsigned int count; + + count = 1 << order; + INIT_LIST_HEAD(&extra_pages); + + /* Loop while I can allocate a page and the page allocated + * is a destination page. + */ + do { + unsigned long pfn, epfn, addr, eaddr; + pages = kimage_alloc_pages(GFP_KERNEL, order); + if (!pages) + break; + pfn = page_to_pfn(pages); + epfn = pfn + count; + addr = pfn << PAGE_SHIFT; + eaddr = epfn << PAGE_SHIFT; + if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || + kimage_is_destination_range(image, addr, eaddr)) + { + list_add(&pages->lru, &extra_pages); + pages = NULL; + } + } while(!pages); + if (pages) { + /* Remember the allocated page... */ + list_add(&pages->lru, &image->control_pages); + + /* Because the page is already in it's destination + * location we will never allocate another page at + * that address. Therefore kimage_alloc_pages + * will not return it (again) and we don't need + * to give it an entry in image->segment[]. + */ + } + /* Deal with the destination pages I have inadvertently allocated. + * + * Ideally I would convert multi-page allocations into single + * page allocations, and add everyting to image->dest_pages. + * + * For now it is simpler to just free the pages. + */ + kimage_free_page_list(&extra_pages); + return pages; + +} + +static struct page *kimage_alloc_crash_control_pages( + struct kimage *image, unsigned int order) +{ + /* Control pages are special, they are the intermediaries + * that are needed while we copy the rest of the pages + * to their final resting place. As such they must + * not conflict with either the destination addresses + * or memory the kernel is already using. + * + * Control pages are also the only pags we must allocate + * when loading a crash kernel. All of the other pages + * are specified by the segments and we just memcpy + * into them directly. + * + * The only case where we really need more than one of + * these are for architectures where we cannot disable + * the MMU and must instead generate an identity mapped + * page table for all of the memory. + * + * Given the low demand this implements a very simple + * allocator that finds the first hole of the appropriate + * size in the reserved memory region, and allocates all + * of the memory up to and including the hole. + */ + unsigned long hole_start, hole_end, size; + struct page *pages; + pages = NULL; + size = (1 << order) << PAGE_SHIFT; + hole_start = (image->control_page + (size - 1)) & ~(size - 1); + hole_end = hole_start + size - 1; + while(hole_end <= crashk_res.end) { + unsigned long i; + if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) { + break; + } + if (hole_end > crashk_res.end) { + break; + } + /* See if I overlap any of the segments */ + for(i = 0; i < image->nr_segments; i++) { + unsigned long mstart, mend; + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz - 1; + if ((hole_end >= mstart) && (hole_start <= mend)) { + /* Advance the hole to the end of the segment */ + hole_start = (mend + (size - 1)) & ~(size - 1); + hole_end = hole_start + size - 1; + break; + } + } + /* If I don't overlap any segments I have found my hole! */ + if (i == image->nr_segments) { + pages = pfn_to_page(hole_start >> PAGE_SHIFT); + break; + } + } + if (pages) { + image->control_page = hole_end; + } + return pages; +} + + +struct page *kimage_alloc_control_pages( + struct kimage *image, unsigned int order) +{ + struct page *pages = NULL; + switch(image->type) { + case KEXEC_TYPE_DEFAULT: + pages = kimage_alloc_normal_control_pages(image, order); + break; + case KEXEC_TYPE_CRASH: + pages = kimage_alloc_crash_control_pages(image, order); + break; + } + return pages; +} + +static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) +{ + if (*image->entry != 0) { + image->entry++; + } + if (image->entry == image->last_entry) { + kimage_entry_t *ind_page; + struct page *page; + page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); + if (!page) { + return -ENOMEM; + } + ind_page = page_address(page); + *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; + image->entry = ind_page; + image->last_entry = + ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); + } + *image->entry = entry; + image->entry++; + *image->entry = 0; + return 0; +} + +static int kimage_set_destination( + struct kimage *image, unsigned long destination) +{ + int result; + + destination &= PAGE_MASK; + result = kimage_add_entry(image, destination | IND_DESTINATION); + if (result == 0) { + image->destination = destination; + } + return result; +} + + +static int kimage_add_page(struct kimage *image, unsigned long page) +{ + int result; + + page &= PAGE_MASK; + result = kimage_add_entry(image, page | IND_SOURCE); + if (result == 0) { + image->destination += PAGE_SIZE; + } + return result; +} + + +static void kimage_free_extra_pages(struct kimage *image) +{ + /* Walk through and free any extra destination pages I may have */ + kimage_free_page_list(&image->dest_pages); + + /* Walk through and free any unuseable pages I have cached */ + kimage_free_page_list(&image->unuseable_pages); + +} +static int kimage_terminate(struct kimage *image) +{ + if (*image->entry != 0) { + image->entry++; + } + *image->entry = IND_DONE; + return 0; +} + +#define for_each_kimage_entry(image, ptr, entry) \ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ + ptr = (entry & IND_INDIRECTION)? \ + phys_to_virt((entry & PAGE_MASK)): ptr +1) + +static void kimage_free_entry(kimage_entry_t entry) +{ + struct page *page; + + page = pfn_to_page(entry >> PAGE_SHIFT); + kimage_free_pages(page); +} + +static void kimage_free(struct kimage *image) +{ + kimage_entry_t *ptr, entry; + kimage_entry_t ind = 0; + + if (!image) + return; + kimage_free_extra_pages(image); + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_INDIRECTION) { + /* Free the previous indirection page */ + if (ind & IND_INDIRECTION) { + kimage_free_entry(ind); + } + /* Save this indirection page until we are + * done with it. + */ + ind = entry; + } + else if (entry & IND_SOURCE) { + kimage_free_entry(entry); + } + } + /* Free the final indirection page */ + if (ind & IND_INDIRECTION) { + kimage_free_entry(ind); + } + + /* Handle any machine specific cleanup */ + machine_kexec_cleanup(image); + + /* Free the kexec control pages... */ + kimage_free_page_list(&image->control_pages); + kfree(image); +} + +static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) +{ + kimage_entry_t *ptr, entry; + unsigned long destination = 0; + + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_DESTINATION) { + destination = entry & PAGE_MASK; + } + else if (entry & IND_SOURCE) { + if (page == destination) { + return ptr; + } + destination += PAGE_SIZE; + } + } + return 0; +} + +static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination) +{ + /* + * Here we implement safeguards to ensure that a source page + * is not copied to its destination page before the data on + * the destination page is no longer useful. + * + * To do this we maintain the invariant that a source page is + * either its own destination page, or it is not a + * destination page at all. + * + * That is slightly stronger than required, but the proof + * that no problems will not occur is trivial, and the + * implementation is simply to verify. + * + * When allocating all pages normally this algorithm will run + * in O(N) time, but in the worst case it will run in O(N^2) + * time. If the runtime is a problem the data structures can + * be fixed. + */ + struct page *page; + unsigned long addr; + + /* + * Walk through the list of destination pages, and see if I + * have a match. + */ + list_for_each_entry(page, &image->dest_pages, lru) { + addr = page_to_pfn(page) << PAGE_SHIFT; + if (addr == destination) { + list_del(&page->lru); + return page; + } + } + page = NULL; + while (1) { + kimage_entry_t *old; + + /* Allocate a page, if we run out of memory give up */ + page = kimage_alloc_pages(gfp_mask, 0); + if (!page) { + return 0; + } + /* If the page cannot be used file it away */ + if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { + list_add(&page->lru, &image->unuseable_pages); + continue; + } + addr = page_to_pfn(page) << PAGE_SHIFT; + + /* If it is the destination page we want use it */ + if (addr == destination) + break; + + /* If the page is not a destination page use it */ + if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) + break; + + /* + * I know that the page is someones destination page. + * See if there is already a source page for this + * destination page. And if so swap the source pages. + */ + old = kimage_dst_used(image, addr); + if (old) { + /* If so move it */ + unsigned long old_addr; + struct page *old_page; + + old_addr = *old & PAGE_MASK; + old_page = pfn_to_page(old_addr >> PAGE_SHIFT); + copy_highpage(page, old_page); + *old = addr | (*old & ~PAGE_MASK); + + /* The old page I have found cannot be a + * destination page, so return it. + */ + addr = old_addr; + page = old_page; + break; + } + else { + /* Place the page on the destination list I + * will use it later. + */ + list_add(&page->lru, &image->dest_pages); + } + } + return page; +} + +static int kimage_load_normal_segment(struct kimage *image, + struct kexec_segment *segment) +{ + unsigned long maddr; + unsigned long ubytes, mbytes; + int result; + unsigned char *buf; + + result = 0; + buf = segment->buf; + ubytes = segment->bufsz; + mbytes = segment->memsz; + maddr = segment->mem; + + result = kimage_set_destination(image, maddr); + if (result < 0) { + goto out; + } + while(mbytes) { + struct page *page; + char *ptr; + size_t uchunk, mchunk; + page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); + if (page == 0) { + result = -ENOMEM; + goto out; + } + result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); + if (result < 0) { + goto out; + } + ptr = kmap(page); + /* Start with a clear page */ + memset(ptr, 0, PAGE_SIZE); + ptr += maddr & ~PAGE_MASK; + mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); + if (mchunk > mbytes) { + mchunk = mbytes; + } + uchunk = mchunk; + if (uchunk > ubytes) { + uchunk = ubytes; + } + result = copy_from_user(ptr, buf, uchunk); + kunmap(page); + if (result) { + result = (result < 0) ? result : -EIO; + goto out; + } + ubytes -= uchunk; + maddr += mchunk; + buf += mchunk; + mbytes -= mchunk; + } + out: + return result; +} + +static int kimage_load_crash_segment(struct kimage *image, + struct kexec_segment *segment) +{ + /* For crash dumps kernels we simply copy the data from + * user space to it's destination. + * We do things a page at a time for the sake of kmap. + */ + unsigned long maddr; + unsigned long ubytes, mbytes; + int result; + unsigned char *buf; + + result = 0; + buf = segment->buf; + ubytes = segment->bufsz; + mbytes = segment->memsz; + maddr = segment->mem; + while(mbytes) { + struct page *page; + char *ptr; + size_t uchunk, mchunk; + page = pfn_to_page(maddr >> PAGE_SHIFT); + if (page == 0) { + result = -ENOMEM; + goto out; + } + ptr = kmap(page); + ptr += maddr & ~PAGE_MASK; + mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); + if (mchunk > mbytes) { + mchunk = mbytes; + } + uchunk = mchunk; + if (uchunk > ubytes) { + uchunk = ubytes; + /* Zero the trailing part of the page */ + memset(ptr + uchunk, 0, mchunk - uchunk); + } + result = copy_from_user(ptr, buf, uchunk); + kunmap(page); + if (result) { + result = (result < 0) ? result : -EIO; + goto out; + } + ubytes -= uchunk; + maddr += mchunk; + buf += mchunk; + mbytes -= mchunk; + } + out: + return result; +} + +static int kimage_load_segment(struct kimage *image, + struct kexec_segment *segment) +{ + int result = -ENOMEM; + switch(image->type) { + case KEXEC_TYPE_DEFAULT: + result = kimage_load_normal_segment(image, segment); + break; + case KEXEC_TYPE_CRASH: + result = kimage_load_crash_segment(image, segment); + break; + } + return result; +} + +/* + * Exec Kernel system call: for obvious reasons only root may call it. + * + * This call breaks up into three pieces. + * - A generic part which loads the new kernel from the current + * address space, and very carefully places the data in the + * allocated pages. + * + * - A generic part that interacts with the kernel and tells all of + * the devices to shut down. Preventing on-going dmas, and placing + * the devices in a consistent state so a later kernel can + * reinitialize them. + * + * - A machine specific part that includes the syscall number + * and the copies the image to it's final destination. And + * jumps into the image at entry. + * + * kexec does not sync, or unmount filesystems so if you need + * that to happen you need to do that yourself. + */ +struct kimage *kexec_image = NULL; +static struct kimage *kexec_crash_image = NULL; +/* + * A home grown binary mutex. + * Nothing can wait so this mutex is safe to use + * in interrupt context :) + */ +static int kexec_lock = 0; + +asmlinkage long sys_kexec_load(unsigned long entry, + unsigned long nr_segments, struct kexec_segment __user *segments, + unsigned long flags) +{ + struct kimage **dest_image, *image; + int locked; + int result; + + /* We only trust the superuser with rebooting the system. */ + if (!capable(CAP_SYS_BOOT)) + return -EPERM; + + /* + * Verify we have a legal set of flags + * This leaves us room for future extensions. + */ + if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) + return -EINVAL; + + /* Verify we are on the appropriate architecture */ + if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && + ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) + { + return -EINVAL; + } + + /* Put an artificial cap on the number + * of segments passed to kexec_load. + */ + if (nr_segments > KEXEC_SEGMENT_MAX) + return -EINVAL; + + image = NULL; + result = 0; + + /* Because we write directly to the reserved memory + * region when loading crash kernels we need a mutex here to + * prevent multiple crash kernels from attempting to load + * simultaneously, and to prevent a crash kernel from loading + * over the top of a in use crash kernel. + * + * KISS: always take the mutex. + */ + locked = xchg(&kexec_lock, 1); + if (locked) { + return -EBUSY; + } + dest_image = &kexec_image; + if (flags & KEXEC_ON_CRASH) { + dest_image = &kexec_crash_image; + } + if (nr_segments > 0) { + unsigned long i; + /* Loading another kernel to reboot into */ + if ((flags & KEXEC_ON_CRASH) == 0) { + result = kimage_normal_alloc(&image, entry, nr_segments, segments); + } + /* Loading another kernel to switch to if this one crashes */ + else if (flags & KEXEC_ON_CRASH) { + /* Free any current crash dump kernel before + * we corrupt it. + */ + kimage_free(xchg(&kexec_crash_image, NULL)); + result = kimage_crash_alloc(&image, entry, nr_segments, segments); + } + if (result) { + goto out; + } + result = machine_kexec_prepare(image); + if (result) { + goto out; + } + for(i = 0; i < nr_segments; i++) { + result = kimage_load_segment(image, &image->segment[i]); + if (result) { + goto out; + } + } + result = kimage_terminate(image); + if (result) { + goto out; + } + } + /* Install the new kernel, and Uninstall the old */ + image = xchg(dest_image, image); + + out: + xchg(&kexec_lock, 0); /* Release the mutex */ + kimage_free(image); + return result; +} + +#ifdef CONFIG_COMPAT +asmlinkage long compat_sys_kexec_load(unsigned long entry, + unsigned long nr_segments, struct compat_kexec_segment __user *segments, + unsigned long flags) +{ + struct compat_kexec_segment in; + struct kexec_segment out, __user *ksegments; + unsigned long i, result; + + /* Don't allow clients that don't understand the native + * architecture to do anything. + */ + if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) { + return -EINVAL; + } + + if (nr_segments > KEXEC_SEGMENT_MAX) { + return -EINVAL; + } + + ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); + for (i=0; i < nr_segments; i++) { + result = copy_from_user(&in, &segments[i], sizeof(in)); + if (result) { + return -EFAULT; + } + + out.buf = compat_ptr(in.buf); + out.bufsz = in.bufsz; + out.mem = in.mem; + out.memsz = in.memsz; + + result = copy_to_user(&ksegments[i], &out, sizeof(out)); + if (result) { + return -EFAULT; + } + } + + return sys_kexec_load(entry, nr_segments, ksegments, flags); +} +#endif + +void crash_kexec(void) +{ + struct kimage *image; + int locked; + + + /* Take the kexec_lock here to prevent sys_kexec_load + * running on one cpu from replacing the crash kernel + * we are using after a panic on a different cpu. + * + * If the crash kernel was not located in a fixed area + * of memory the xchg(&kexec_crash_image) would be + * sufficient. But since I reuse the memory... + */ + locked = xchg(&kexec_lock, 1); + if (!locked) { + image = xchg(&kexec_crash_image, NULL); + if (image) { + machine_crash_shutdown(); + machine_kexec(image); + } + xchg(&kexec_lock, 0); + } +} diff --git a/kernel/panic.c b/kernel/panic.c index 081f7465fc8d..66f43d33cd80 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -18,6 +18,7 @@ #include #include #include +#include int panic_timeout; int panic_on_oops; @@ -63,6 +64,13 @@ NORET_TYPE void panic(const char * fmt, ...) unsigned long caller = (unsigned long) __builtin_return_address(0); #endif + /* + * It's possible to come here directly from a panic-assertion and not + * have preempt disabled. Some functions called from here want + * preempt to be disabled. No point enabling it later though... + */ + preempt_disable(); + bust_spinlocks(1); va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); @@ -70,7 +78,19 @@ NORET_TYPE void panic(const char * fmt, ...) printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); bust_spinlocks(0); + /* + * If we have crashed and we have a crash kernel loaded let it handle + * everything else. + * Do we want to call this before we try to display a message? + */ + crash_kexec(); + #ifdef CONFIG_SMP + /* + * Note smp_send_stop is the usual smp shutdown function, which + * unfortunately means it may not be hardened to work in a panic + * situation. + */ smp_send_stop(); #endif @@ -79,8 +99,7 @@ NORET_TYPE void panic(const char * fmt, ...) if (!panic_blink) panic_blink = no_blink; - if (panic_timeout > 0) - { + if (panic_timeout > 0) { /* * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked.. diff --git a/kernel/sys.c b/kernel/sys.c index dac10161ca23..9a24374c23bc 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include #include @@ -439,6 +441,24 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user machine_restart(buffer); break; +#ifdef CONFIG_KEXEC + case LINUX_REBOOT_CMD_KEXEC: + { + struct kimage *image; + image = xchg(&kexec_image, 0); + if (!image) { + unlock_kernel(); + return -EINVAL; + } + notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); + system_state = SYSTEM_RESTART; + device_shutdown(); + printk(KERN_EMERG "Starting new kernel\n"); + machine_shutdown(); + machine_kexec(image); + break; + } +#endif #ifdef CONFIG_SOFTWARE_SUSPEND case LINUX_REBOOT_CMD_SW_SUSPEND: { diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 6f15bea7d1a8..29196ce9b40f 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -18,6 +18,8 @@ cond_syscall(sys_acct); cond_syscall(sys_lookup_dcookie); cond_syscall(sys_swapon); cond_syscall(sys_swapoff); +cond_syscall(sys_kexec_load); +cond_syscall(compat_sys_kexec_load); cond_syscall(sys_init_module); cond_syscall(sys_delete_module); cond_syscall(sys_socketpair); -- cgit v1.2.3 From 50cccc699ed849d31c9e3f7643db33edade20e4e Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Sat, 25 Jun 2005 14:57:55 -0700 Subject: [PATCH] Kexec on panic vmlinux initrd fix This is a minor bug fix in kexec to resolve the problem of loading panic kernel with initrd. o Problem: Loading a capture kenrel fails if initrd is also being loaded. This has been observed for vmlinux image for kexec on panic case. o This patch fixes the problem. In segment location and size verification logic, minor correction has been done. Segment memory end (mend) should be mstart + memsz - 1. This one byte offset was source of failure for initrd loading which was being loaded at hole boundary. Signed-off-by: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index def9c73ec9a6..a0411b3bd54a 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -264,7 +264,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; mstart = image->segment[i].mem; - mend = mstart + image->segment[i].memsz; + mend = mstart + image->segment[i].memsz - 1; /* Ensure we are within the crash kernel limits */ if ((mstart < crashk_res.start) || (mend > crashk_res.end)) goto out; -- cgit v1.2.3 From 625f1c8219d95300ed32e4c67eb62a50ded095ba Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Sat, 25 Jun 2005 14:58:12 -0700 Subject: [PATCH] Kdump: Export crash notes section address through sysfs o Following patch exports kexec global variable "crash_notes" to user space through sysfs as kernel attribute in /sys/kernel. Signed-off-by: Maneesh Soni Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/ksysfs.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'kernel') diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 1f064a63f8cf..015fb69ad94d 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -30,12 +30,25 @@ static ssize_t hotplug_seqnum_show(struct subsystem *subsys, char *page) KERNEL_ATTR_RO(hotplug_seqnum); #endif +#ifdef CONFIG_KEXEC +#include + +static ssize_t crash_notes_show(struct subsystem *subsys, char *page) +{ + return sprintf(page, "%p\n", (void *)crash_notes); +} +KERNEL_ATTR_RO(crash_notes); +#endif + decl_subsys(kernel, NULL, NULL); EXPORT_SYMBOL_GPL(kernel_subsys); static struct attribute * kernel_attrs[] = { #ifdef CONFIG_HOTPLUG &hotplug_seqnum_attr.attr, +#endif +#ifdef CONFIG_KEXEC + &crash_notes_attr.attr, #endif NULL }; -- cgit v1.2.3 From 60e64d46a58236e3c718074372cab6a5b56a3b15 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Sat, 25 Jun 2005 14:58:19 -0700 Subject: [PATCH] kdump: Routines for copying dump pages This patch provides the interfaces necessary to read the dump contents, treating it as a high memory device. Signed off by Hariprasad Nellitheertha Signed-off-by: Eric Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Makefile | 1 + kernel/crash_dump.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 kernel/crash_dump.c (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index cfc8b0dea950..cb05cd05d237 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_AUDITSYSCALL) += auditsc.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_SECCOMP) += seccomp.o ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c new file mode 100644 index 000000000000..5a1e6d5d203e --- /dev/null +++ b/kernel/crash_dump.c @@ -0,0 +1,49 @@ +/* + * kernel/crash_dump.c - Memory preserving reboot related code. + * + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Copy a page from "oldmem". For this page, there is no pte mapped + * in the current kernel. We stitch up a pte, similar to kmap_atomic. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, int userbuf) +{ + void *page, *vaddr; + + if (!csize) + return 0; + + page = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!page) + return -ENOMEM; + + vaddr = kmap_atomic_pfn(pfn, KM_PTE0); + copy_page(page, vaddr); + kunmap_atomic(vaddr, KM_PTE0); + + if (userbuf) { + if (copy_to_user(buf, (page + offset), csize)) { + kfree(page); + return -EFAULT; + } + } else { + memcpy(buf, (page + offset), csize); + } + + kfree(page); + return csize; +} -- cgit v1.2.3 From 2030eae52b416a9a9f0ffda74c982b7f1e19496d Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Sat, 25 Jun 2005 14:58:20 -0700 Subject: [PATCH] Retrieve elfcorehdr address from command line This patch adds support for retrieving the address of elf core header if one is passed in command line. Signed-off-by: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/crash_dump.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c index 5a1e6d5d203e..10b966c3744c 100644 --- a/kernel/crash_dump.c +++ b/kernel/crash_dump.c @@ -15,6 +15,9 @@ #include #include +/* Stores the physical address of elf header of crash image. */ +unsigned long long elfcorehdr_addr; + /* * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. We stitch up a pte, similar to kmap_atomic. -- cgit v1.2.3 From 666bfddbe8b8fd4fd44617d6c55193d5ac7edb29 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Sat, 25 Jun 2005 14:58:21 -0700 Subject: [PATCH] kdump: Access dump file in elf format (/proc/vmcore) From: "Vivek Goyal" o Support for /proc/vmcore interface. This interface exports elf core image either in ELF32 or ELF64 format, depending on the format in which elf headers have been stored by crashed kernel. o Added support for CONFIG_VMCORE config option. o Removed the dependency on /proc/kcore. From: "Eric W. Biederman" This patch has been refactored to more closely match the prevailing style in the affected files. And to clearly indicate the dependency between /proc/kcore and proc/vmcore.c From: Hariprasad Nellitheertha This patch contains the code that provides an ELF format interface to the previous kernel's memory post kexec reboot. Signed off by Hariprasad Nellitheertha Signed-off-by: Eric Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/crash_dump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c index 10b966c3744c..459ba49e376a 100644 --- a/kernel/crash_dump.c +++ b/kernel/crash_dump.c @@ -16,7 +16,7 @@ #include /* Stores the physical address of elf header of crash image. */ -unsigned long long elfcorehdr_addr; +unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; /* * Copy a page from "oldmem". For this page, there is no pte mapped -- cgit v1.2.3 From 6e274d144302068a00794ec22e73520c0615cb6f Mon Sep 17 00:00:00 2001 From: Alexander Nyberg Date: Sat, 25 Jun 2005 14:58:26 -0700 Subject: [PATCH] kdump: Use real pt_regs from exception Makes kexec_crashdump() take a pt_regs * as an argument. This allows to get exact register state at the point of the crash. If we come from direct panic assertion NULL will be passed and the current registers saved before crashdump. This hooks into two places: die(): check the conditions under which we will panic when calling do_exit and go there directly with the pt_regs that caused the fatal fault. die_nmi(): If we receive an NMI lockup while in the kernel use the pt_regs and go directly to crash_kexec(). We're probably nested up badly at this point so this might be the only chance to escape with proper information. Signed-off-by: Alexander Nyberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 13 +++++++++++-- kernel/panic.c | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index a0411b3bd54a..277f22afe74b 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -18,6 +18,8 @@ #include #include #include +#include + #include #include #include @@ -32,6 +34,13 @@ struct resource crashk_res = { .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; +int kexec_should_crash(struct task_struct *p) +{ + if (in_interrupt() || !p->pid || p->pid == 1 || panic_on_oops) + return 1; + return 0; +} + /* * When kexec transitions to the new kernel there is a one-to-one * mapping between physical and virtual addresses. On processors @@ -1010,7 +1019,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, } #endif -void crash_kexec(void) +void crash_kexec(struct pt_regs *regs) { struct kimage *image; int locked; @@ -1028,7 +1037,7 @@ void crash_kexec(void) if (!locked) { image = xchg(&kexec_crash_image, NULL); if (image) { - machine_crash_shutdown(); + machine_crash_shutdown(regs); machine_kexec(image); } xchg(&kexec_lock, 0); diff --git a/kernel/panic.c b/kernel/panic.c index 66f43d33cd80..74ba5f3e46c7 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -83,7 +83,7 @@ NORET_TYPE void panic(const char * fmt, ...) * everything else. * Do we want to call this before we try to display a message? */ - crash_kexec(); + crash_kexec(NULL); #ifdef CONFIG_SMP /* -- cgit v1.2.3 From 72414d3f1d22fc3e311b162fca95c430048d38ce Mon Sep 17 00:00:00 2001 From: Maneesh Soni Date: Sat, 25 Jun 2005 14:58:28 -0700 Subject: [PATCH] kexec code cleanup o Following patch provides purely cosmetic changes and corrects CodingStyle guide lines related certain issues like below in kexec related files o braces for one line "if" statements, "for" loops, o more than 80 column wide lines, o No space after "while", "for" and "switch" key words o Changes: o take-2: Removed the extra tab before "case" key words. o take-3: Put operator at the end of line and space before "*/" Signed-off-by: Maneesh Soni Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 302 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 160 insertions(+), 142 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index 277f22afe74b..7843548cf2d9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -87,12 +87,15 @@ int kexec_should_crash(struct task_struct *p) */ #define KIMAGE_NO_DEST (-1UL) -static int kimage_is_destination_range( - struct kimage *image, unsigned long start, unsigned long end); -static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest); +static int kimage_is_destination_range(struct kimage *image, + unsigned long start, unsigned long end); +static struct page *kimage_alloc_page(struct kimage *image, + unsigned int gfp_mask, + unsigned long dest); static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, - unsigned long nr_segments, struct kexec_segment __user *segments) + unsigned long nr_segments, + struct kexec_segment __user *segments) { size_t segment_bytes; struct kimage *image; @@ -102,9 +105,9 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, /* Allocate a controlling structure */ result = -ENOMEM; image = kmalloc(sizeof(*image), GFP_KERNEL); - if (!image) { + if (!image) goto out; - } + memset(image, 0, sizeof(*image)); image->head = 0; image->entry = &image->head; @@ -145,6 +148,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, result = -EADDRNOTAVAIL; for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; + mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz; if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) @@ -159,12 +163,13 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, * easy explanation as one segment stops on another. */ result = -EINVAL; - for(i = 0; i < nr_segments; i++) { + for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; unsigned long j; + mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz; - for(j = 0; j < i; j++) { + for (j = 0; j < i; j++) { unsigned long pstart, pend; pstart = image->segment[j].mem; pend = pstart + image->segment[j].memsz; @@ -180,25 +185,25 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, * later on. */ result = -EINVAL; - for(i = 0; i < nr_segments; i++) { + for (i = 0; i < nr_segments; i++) { if (image->segment[i].bufsz > image->segment[i].memsz) goto out; } - result = 0; - out: - if (result == 0) { +out: + if (result == 0) *rimage = image; - } else { + else kfree(image); - } + return result; } static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, - unsigned long nr_segments, struct kexec_segment __user *segments) + unsigned long nr_segments, + struct kexec_segment __user *segments) { int result; struct kimage *image; @@ -206,9 +211,9 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, /* Allocate and initialize a controlling structure */ image = NULL; result = do_kimage_alloc(&image, entry, nr_segments, segments); - if (result) { + if (result) goto out; - } + *rimage = image; /* @@ -218,7 +223,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, - get_order(KEXEC_CONTROL_CODE_SIZE)); + get_order(KEXEC_CONTROL_CODE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; @@ -226,16 +231,17 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, result = 0; out: - if (result == 0) { + if (result == 0) *rimage = image; - } else { + else kfree(image); - } + return result; } static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, - unsigned long nr_segments, struct kexec_segment *segments) + unsigned long nr_segments, + struct kexec_segment *segments) { int result; struct kimage *image; @@ -250,9 +256,8 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, /* Allocate and initialize a controlling structure */ result = do_kimage_alloc(&image, entry, nr_segments, segments); - if (result) { + if (result) goto out; - } /* Enable the special crash kernel control page * allocation policy. @@ -272,6 +277,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, result = -EADDRNOTAVAIL; for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; + mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz - 1; /* Ensure we are within the crash kernel limits */ @@ -279,7 +285,6 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, goto out; } - /* * Find a location for the control code buffer, and add * the vector of segments so that it's pages will also be @@ -287,80 +292,84 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, - get_order(KEXEC_CONTROL_CODE_SIZE)); + get_order(KEXEC_CONTROL_CODE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; } result = 0; - out: - if (result == 0) { +out: + if (result == 0) *rimage = image; - } else { + else kfree(image); - } + return result; } -static int kimage_is_destination_range( - struct kimage *image, unsigned long start, unsigned long end) +static int kimage_is_destination_range(struct kimage *image, + unsigned long start, + unsigned long end) { unsigned long i; for (i = 0; i < image->nr_segments; i++) { unsigned long mstart, mend; + mstart = image->segment[i].mem; - mend = mstart + image->segment[i].memsz; - if ((end > mstart) && (start < mend)) { + mend = mstart + image->segment[i].memsz; + if ((end > mstart) && (start < mend)) return 1; - } } + return 0; } -static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order) +static struct page *kimage_alloc_pages(unsigned int gfp_mask, + unsigned int order) { struct page *pages; + pages = alloc_pages(gfp_mask, order); if (pages) { unsigned int count, i; pages->mapping = NULL; pages->private = order; count = 1 << order; - for(i = 0; i < count; i++) { + for (i = 0; i < count; i++) SetPageReserved(pages + i); - } } + return pages; } static void kimage_free_pages(struct page *page) { unsigned int order, count, i; + order = page->private; count = 1 << order; - for(i = 0; i < count; i++) { + for (i = 0; i < count; i++) ClearPageReserved(page + i); - } __free_pages(page, order); } static void kimage_free_page_list(struct list_head *list) { struct list_head *pos, *next; + list_for_each_safe(pos, next, list) { struct page *page; page = list_entry(pos, struct page, lru); list_del(&page->lru); - kimage_free_pages(page); } } -static struct page *kimage_alloc_normal_control_pages( - struct kimage *image, unsigned int order) +static struct page *kimage_alloc_normal_control_pages(struct kimage *image, + unsigned int order) { /* Control pages are special, they are the intermediaries * that are needed while we copy the rest of the pages @@ -387,6 +396,7 @@ static struct page *kimage_alloc_normal_control_pages( */ do { unsigned long pfn, epfn, addr, eaddr; + pages = kimage_alloc_pages(GFP_KERNEL, order); if (!pages) break; @@ -395,12 +405,12 @@ static struct page *kimage_alloc_normal_control_pages( addr = pfn << PAGE_SHIFT; eaddr = epfn << PAGE_SHIFT; if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || - kimage_is_destination_range(image, addr, eaddr)) - { + kimage_is_destination_range(image, addr, eaddr)) { list_add(&pages->lru, &extra_pages); pages = NULL; } - } while(!pages); + } while (!pages); + if (pages) { /* Remember the allocated page... */ list_add(&pages->lru, &image->control_pages); @@ -420,12 +430,12 @@ static struct page *kimage_alloc_normal_control_pages( * For now it is simpler to just free the pages. */ kimage_free_page_list(&extra_pages); - return pages; + return pages; } -static struct page *kimage_alloc_crash_control_pages( - struct kimage *image, unsigned int order) +static struct page *kimage_alloc_crash_control_pages(struct kimage *image, + unsigned int order) { /* Control pages are special, they are the intermediaries * that are needed while we copy the rest of the pages @@ -450,21 +460,22 @@ static struct page *kimage_alloc_crash_control_pages( */ unsigned long hole_start, hole_end, size; struct page *pages; + pages = NULL; size = (1 << order) << PAGE_SHIFT; hole_start = (image->control_page + (size - 1)) & ~(size - 1); hole_end = hole_start + size - 1; - while(hole_end <= crashk_res.end) { + while (hole_end <= crashk_res.end) { unsigned long i; - if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) { + + if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) break; - } - if (hole_end > crashk_res.end) { + if (hole_end > crashk_res.end) break; - } /* See if I overlap any of the segments */ - for(i = 0; i < image->nr_segments; i++) { + for (i = 0; i < image->nr_segments; i++) { unsigned long mstart, mend; + mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz - 1; if ((hole_end >= mstart) && (hole_start <= mend)) { @@ -480,18 +491,19 @@ static struct page *kimage_alloc_crash_control_pages( break; } } - if (pages) { + if (pages) image->control_page = hole_end; - } + return pages; } -struct page *kimage_alloc_control_pages( - struct kimage *image, unsigned int order) +struct page *kimage_alloc_control_pages(struct kimage *image, + unsigned int order) { struct page *pages = NULL; - switch(image->type) { + + switch (image->type) { case KEXEC_TYPE_DEFAULT: pages = kimage_alloc_normal_control_pages(image, order); break; @@ -499,43 +511,46 @@ struct page *kimage_alloc_control_pages( pages = kimage_alloc_crash_control_pages(image, order); break; } + return pages; } static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) { - if (*image->entry != 0) { + if (*image->entry != 0) image->entry++; - } + if (image->entry == image->last_entry) { kimage_entry_t *ind_page; struct page *page; + page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); - if (!page) { + if (!page) return -ENOMEM; - } + ind_page = page_address(page); *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; image->entry = ind_page; - image->last_entry = - ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); + image->last_entry = ind_page + + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); } *image->entry = entry; image->entry++; *image->entry = 0; + return 0; } -static int kimage_set_destination( - struct kimage *image, unsigned long destination) +static int kimage_set_destination(struct kimage *image, + unsigned long destination) { int result; destination &= PAGE_MASK; result = kimage_add_entry(image, destination | IND_DESTINATION); - if (result == 0) { + if (result == 0) image->destination = destination; - } + return result; } @@ -546,9 +561,9 @@ static int kimage_add_page(struct kimage *image, unsigned long page) page &= PAGE_MASK; result = kimage_add_entry(image, page | IND_SOURCE); - if (result == 0) { + if (result == 0) image->destination += PAGE_SIZE; - } + return result; } @@ -564,10 +579,11 @@ static void kimage_free_extra_pages(struct kimage *image) } static int kimage_terminate(struct kimage *image) { - if (*image->entry != 0) { + if (*image->entry != 0) image->entry++; - } + *image->entry = IND_DONE; + return 0; } @@ -591,26 +607,24 @@ static void kimage_free(struct kimage *image) if (!image) return; + kimage_free_extra_pages(image); for_each_kimage_entry(image, ptr, entry) { if (entry & IND_INDIRECTION) { /* Free the previous indirection page */ - if (ind & IND_INDIRECTION) { + if (ind & IND_INDIRECTION) kimage_free_entry(ind); - } /* Save this indirection page until we are * done with it. */ ind = entry; } - else if (entry & IND_SOURCE) { + else if (entry & IND_SOURCE) kimage_free_entry(entry); - } } /* Free the final indirection page */ - if (ind & IND_INDIRECTION) { + if (ind & IND_INDIRECTION) kimage_free_entry(ind); - } /* Handle any machine specific cleanup */ machine_kexec_cleanup(image); @@ -620,26 +634,28 @@ static void kimage_free(struct kimage *image) kfree(image); } -static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) +static kimage_entry_t *kimage_dst_used(struct kimage *image, + unsigned long page) { kimage_entry_t *ptr, entry; unsigned long destination = 0; for_each_kimage_entry(image, ptr, entry) { - if (entry & IND_DESTINATION) { + if (entry & IND_DESTINATION) destination = entry & PAGE_MASK; - } else if (entry & IND_SOURCE) { - if (page == destination) { + if (page == destination) return ptr; - } destination += PAGE_SIZE; } } + return 0; } -static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination) +static struct page *kimage_alloc_page(struct kimage *image, + unsigned int gfp_mask, + unsigned long destination) { /* * Here we implement safeguards to ensure that a source page @@ -679,11 +695,11 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas /* Allocate a page, if we run out of memory give up */ page = kimage_alloc_pages(gfp_mask, 0); - if (!page) { + if (!page) return 0; - } /* If the page cannot be used file it away */ - if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { + if (page_to_pfn(page) > + (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { list_add(&page->lru, &image->unuseable_pages); continue; } @@ -694,7 +710,8 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas break; /* If the page is not a destination page use it */ - if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) + if (!kimage_is_destination_range(image, addr, + addr + PAGE_SIZE)) break; /* @@ -727,11 +744,12 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas list_add(&page->lru, &image->dest_pages); } } + return page; } static int kimage_load_normal_segment(struct kimage *image, - struct kexec_segment *segment) + struct kexec_segment *segment) { unsigned long maddr; unsigned long ubytes, mbytes; @@ -745,34 +763,36 @@ static int kimage_load_normal_segment(struct kimage *image, maddr = segment->mem; result = kimage_set_destination(image, maddr); - if (result < 0) { + if (result < 0) goto out; - } - while(mbytes) { + + while (mbytes) { struct page *page; char *ptr; size_t uchunk, mchunk; + page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); if (page == 0) { result = -ENOMEM; goto out; } - result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); - if (result < 0) { + result = kimage_add_page(image, page_to_pfn(page) + << PAGE_SHIFT); + if (result < 0) goto out; - } + ptr = kmap(page); /* Start with a clear page */ memset(ptr, 0, PAGE_SIZE); ptr += maddr & ~PAGE_MASK; mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); - if (mchunk > mbytes) { + if (mchunk > mbytes) mchunk = mbytes; - } + uchunk = mchunk; - if (uchunk > ubytes) { + if (uchunk > ubytes) uchunk = ubytes; - } + result = copy_from_user(ptr, buf, uchunk); kunmap(page); if (result) { @@ -784,12 +804,12 @@ static int kimage_load_normal_segment(struct kimage *image, buf += mchunk; mbytes -= mchunk; } - out: +out: return result; } static int kimage_load_crash_segment(struct kimage *image, - struct kexec_segment *segment) + struct kexec_segment *segment) { /* For crash dumps kernels we simply copy the data from * user space to it's destination. @@ -805,10 +825,11 @@ static int kimage_load_crash_segment(struct kimage *image, ubytes = segment->bufsz; mbytes = segment->memsz; maddr = segment->mem; - while(mbytes) { + while (mbytes) { struct page *page; char *ptr; size_t uchunk, mchunk; + page = pfn_to_page(maddr >> PAGE_SHIFT); if (page == 0) { result = -ENOMEM; @@ -817,9 +838,9 @@ static int kimage_load_crash_segment(struct kimage *image, ptr = kmap(page); ptr += maddr & ~PAGE_MASK; mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); - if (mchunk > mbytes) { + if (mchunk > mbytes) mchunk = mbytes; - } + uchunk = mchunk; if (uchunk > ubytes) { uchunk = ubytes; @@ -837,15 +858,16 @@ static int kimage_load_crash_segment(struct kimage *image, buf += mchunk; mbytes -= mchunk; } - out: +out: return result; } static int kimage_load_segment(struct kimage *image, - struct kexec_segment *segment) + struct kexec_segment *segment) { int result = -ENOMEM; - switch(image->type) { + + switch (image->type) { case KEXEC_TYPE_DEFAULT: result = kimage_load_normal_segment(image, segment); break; @@ -853,6 +875,7 @@ static int kimage_load_segment(struct kimage *image, result = kimage_load_crash_segment(image, segment); break; } + return result; } @@ -885,9 +908,9 @@ static struct kimage *kexec_crash_image = NULL; */ static int kexec_lock = 0; -asmlinkage long sys_kexec_load(unsigned long entry, - unsigned long nr_segments, struct kexec_segment __user *segments, - unsigned long flags) +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags) { struct kimage **dest_image, *image; int locked; @@ -907,9 +930,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, /* Verify we are on the appropriate architecture */ if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) - { return -EINVAL; - } /* Put an artificial cap on the number * of segments passed to kexec_load. @@ -929,58 +950,59 @@ asmlinkage long sys_kexec_load(unsigned long entry, * KISS: always take the mutex. */ locked = xchg(&kexec_lock, 1); - if (locked) { + if (locked) return -EBUSY; - } + dest_image = &kexec_image; - if (flags & KEXEC_ON_CRASH) { + if (flags & KEXEC_ON_CRASH) dest_image = &kexec_crash_image; - } if (nr_segments > 0) { unsigned long i; + /* Loading another kernel to reboot into */ - if ((flags & KEXEC_ON_CRASH) == 0) { - result = kimage_normal_alloc(&image, entry, nr_segments, segments); - } + if ((flags & KEXEC_ON_CRASH) == 0) + result = kimage_normal_alloc(&image, entry, + nr_segments, segments); /* Loading another kernel to switch to if this one crashes */ else if (flags & KEXEC_ON_CRASH) { /* Free any current crash dump kernel before * we corrupt it. */ kimage_free(xchg(&kexec_crash_image, NULL)); - result = kimage_crash_alloc(&image, entry, nr_segments, segments); + result = kimage_crash_alloc(&image, entry, + nr_segments, segments); } - if (result) { + if (result) goto out; - } + result = machine_kexec_prepare(image); - if (result) { + if (result) goto out; - } - for(i = 0; i < nr_segments; i++) { + + for (i = 0; i < nr_segments; i++) { result = kimage_load_segment(image, &image->segment[i]); - if (result) { + if (result) goto out; - } } result = kimage_terminate(image); - if (result) { + if (result) goto out; - } } /* Install the new kernel, and Uninstall the old */ image = xchg(dest_image, image); - out: +out: xchg(&kexec_lock, 0); /* Release the mutex */ kimage_free(image); + return result; } #ifdef CONFIG_COMPAT asmlinkage long compat_sys_kexec_load(unsigned long entry, - unsigned long nr_segments, struct compat_kexec_segment __user *segments, - unsigned long flags) + unsigned long nr_segments, + struct compat_kexec_segment __user *segments, + unsigned long flags) { struct compat_kexec_segment in; struct kexec_segment out, __user *ksegments; @@ -989,20 +1011,17 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, /* Don't allow clients that don't understand the native * architecture to do anything. */ - if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) { + if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) return -EINVAL; - } - if (nr_segments > KEXEC_SEGMENT_MAX) { + if (nr_segments > KEXEC_SEGMENT_MAX) return -EINVAL; - } ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); for (i=0; i < nr_segments; i++) { result = copy_from_user(&in, &segments[i], sizeof(in)); - if (result) { + if (result) return -EFAULT; - } out.buf = compat_ptr(in.buf); out.bufsz = in.bufsz; @@ -1010,9 +1029,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, out.memsz = in.memsz; result = copy_to_user(&ksegments[i], &out, sizeof(out)); - if (result) { + if (result) return -EFAULT; - } } return sys_kexec_load(entry, nr_segments, ksegments, flags); -- cgit v1.2.3 From 96ec3efdcbaea4f403f2a5f1204edbf903a01961 Mon Sep 17 00:00:00 2001 From: Domen Puncer Date: Sat, 25 Jun 2005 14:58:43 -0700 Subject: [PATCH] kernel/timer: fix msleep_interruptible() comment The comment for msleep_interruptible() is wrong, as it will ignore wait-queue events, but will wake up early for signals. Signed-off-by: Nishanth Aravamudan Signed-off-by: Domen Puncer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 51ff917c9590..f2a11887a726 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1597,7 +1597,7 @@ void msleep(unsigned int msecs) EXPORT_SYMBOL(msleep); /** - * msleep_interruptible - sleep waiting for waitqueue interruptions + * msleep_interruptible - sleep waiting for signals * @msecs: Time in milliseconds to sleep for */ unsigned long msleep_interruptible(unsigned int msecs) -- cgit v1.2.3 From 5a6b454f8024bac68495b6cd51615feb0b54baa9 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sat, 25 Jun 2005 14:58:48 -0700 Subject: [PATCH] remove redundant NULL check before before kfree() in kernel/sysctl.c Signed-off-by: Jesper Juhl Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 24a4d12d5aa9..270ee7fadbd8 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1000,8 +1000,7 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol int error = parse_table(name, nlen, oldval, oldlenp, newval, newlen, head->ctl_table, &context); - if (context) - kfree(context); + kfree(context); if (error != -ENOTDIR) return error; tmp = tmp->next; -- cgit v1.2.3 From 8c0e33c133021ee241e9d51255b9fb18eb34ef0e Mon Sep 17 00:00:00 2001 From: Nick Wilson Date: Sat, 25 Jun 2005 14:59:00 -0700 Subject: [PATCH] Use ALIGN to remove duplicate code This patch makes use of ALIGN() to remove duplicate round-up code. Signed-off-by: Nick Wilson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 52f696f11adf..26967e042201 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -263,7 +263,7 @@ static int find_resource(struct resource *root, struct resource *new, new->start = min; if (new->end > max) new->end = max; - new->start = (new->start + align - 1) & ~(align - 1); + new->start = ALIGN(new->start, align); if (alignf) alignf(alignf_data, new, size, align); if (new->start < new->end && new->end - new->start >= size - 1) { -- cgit v1.2.3 From 3e1d1d28d99dabe63c64f7f40f1ca1d646de1f73 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 24 Jun 2005 23:13:50 -0700 Subject: [PATCH] Cleanup patch for process freezing 1. Establish a simple API for process freezing defined in linux/include/sched.h: frozen(process) Check for frozen process freezing(process) Check if a process is being frozen freeze(process) Tell a process to freeze (go to refrigerator) thaw_process(process) Restart process frozen_process(process) Process is frozen now 2. Remove all references to PF_FREEZE and PF_FROZEN from all kernel sources except sched.h 3. Fix numerous locations where try_to_freeze is manually done by a driver 4. Remove the argument that is no longer necessary from two function calls. 5. Some whitespace cleanup 6. Clear potential race in refrigerator (provides an open window of PF_FREEZE cleared before setting PF_FROZEN, recalc_sigpending does not check PF_FROZEN). This patch does not address the problem of freeze_processes() violating the rule that a task may only modify its own flags by setting PF_FREEZE. This is not clean in an SMP environment. freeze(process) is therefore not SMP safe! Signed-off-by: Christoph Lameter Signed-off-by: Linus Torvalds --- kernel/power/process.c | 26 ++++++++++---------------- kernel/sched.c | 3 +-- kernel/signal.c | 5 ++--- 3 files changed, 13 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/power/process.c b/kernel/power/process.c index 78d92dc6a1ed..0a086640bcfc 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -32,7 +32,7 @@ static inline int freezeable(struct task_struct * p) } /* Refrigerator is place where frozen processes are stored :-). */ -void refrigerator(unsigned long flag) +void refrigerator(void) { /* Hmm, should we be allowed to suspend when there are realtime processes around? */ @@ -41,14 +41,13 @@ void refrigerator(unsigned long flag) current->state = TASK_UNINTERRUPTIBLE; pr_debug("%s entered refrigerator\n", current->comm); printk("="); - current->flags &= ~PF_FREEZE; + frozen_process(current); spin_lock_irq(¤t->sighand->siglock); recalc_sigpending(); /* We sent fake signal, clean it up */ spin_unlock_irq(¤t->sighand->siglock); - current->flags |= PF_FROZEN; - while (current->flags & PF_FROZEN) + while (frozen(current)) schedule(); pr_debug("%s left refrigerator\n", current->comm); current->state = save; @@ -57,10 +56,10 @@ void refrigerator(unsigned long flag) /* 0 = success, else # of processes that we failed to stop */ int freeze_processes(void) { - int todo; - unsigned long start_time; + int todo; + unsigned long start_time; struct task_struct *g, *p; - + printk( "Stopping tasks: " ); start_time = jiffies; do { @@ -70,14 +69,12 @@ int freeze_processes(void) unsigned long flags; if (!freezeable(p)) continue; - if ((p->flags & PF_FROZEN) || + if ((frozen(p)) || (p->state == TASK_TRACED) || (p->state == TASK_STOPPED)) continue; - /* FIXME: smp problem here: we may not access other process' flags - without locking */ - p->flags |= PF_FREEZE; + freeze(p); spin_lock_irqsave(&p->sighand->siglock, flags); signal_wake_up(p, 0); spin_unlock_irqrestore(&p->sighand->siglock, flags); @@ -91,7 +88,7 @@ int freeze_processes(void) return todo; } } while(todo); - + printk( "|\n" ); BUG_ON(in_atomic()); return 0; @@ -106,10 +103,7 @@ void thaw_processes(void) do_each_thread(g, p) { if (!freezeable(p)) continue; - if (p->flags & PF_FROZEN) { - p->flags &= ~PF_FROZEN; - wake_up_process(p); - } else + if (!thaw_process(p)) printk(KERN_INFO " Strange, %s not stopped\n", p->comm ); } while_each_thread(g, p); diff --git a/kernel/sched.c b/kernel/sched.c index 76080d142e3d..6fa9ea4ae44c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4174,8 +4174,7 @@ static int migration_thread(void * data) struct list_head *head; migration_req_t *req; - if (current->flags & PF_FREEZE) - refrigerator(PF_FREEZE); + try_to_freeze(); spin_lock_irq(&rq->lock); diff --git a/kernel/signal.c b/kernel/signal.c index d1258729a5f9..ca1186eef938 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -213,7 +213,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) fastcall void recalc_sigpending_tsk(struct task_struct *t) { if (t->signal->group_stop_count > 0 || - (t->flags & PF_FREEZE) || + (freezing(t)) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) set_tsk_thread_flag(t, TIF_SIGPENDING); @@ -2231,8 +2231,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, current->state = TASK_INTERRUPTIBLE; timeout = schedule_timeout(timeout); - if (current->flags & PF_FREEZE) - refrigerator(PF_FREEZE); + try_to_freeze(); spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(current, &these, &info); current->blocked = current->real_blocked; -- cgit v1.2.3 From 22e2c507c301c3dbbcf91b4948b88f78842ee6c9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 27 Jun 2005 10:55:12 +0200 Subject: [PATCH] Update cfq io scheduler to time sliced design This updates the CFQ io scheduler to the new time sliced design (cfq v3). It provides full process fairness, while giving excellent aggregate system throughput even for many competing processes. It supports io priorities, either inherited from the cpu nice value or set directly with the ioprio_get/set syscalls. The latter closely mimic set/getpriority. This import is based on my latest from -mm. Signed-off-by: Jens Axboe Signed-off-by: Linus Torvalds --- kernel/exit.c | 2 ++ kernel/fork.c | 5 +++++ kernel/sched.c | 8 -------- 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 3ebcd60a19c6..9d1b10ed0135 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -784,6 +784,8 @@ fastcall NORET_TYPE void do_exit(long code) profile_task_exit(tsk); + WARN_ON(atomic_read(&tsk->fs_excl)); + if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) diff --git a/kernel/fork.c b/kernel/fork.c index 2c7806873bfd..cdef6cea8900 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1090,6 +1090,11 @@ static task_t *copy_process(unsigned long clone_flags, spin_unlock(¤t->sighand->siglock); } + /* + * inherit ioprio + */ + p->ioprio = current->ioprio; + SET_LINKS(p); if (unlikely(p->ptrace & PT_PTRACED)) __ptrace_link(p, current->parent); diff --git a/kernel/sched.c b/kernel/sched.c index a07cff90d849..e2b0d3e4dd06 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3448,15 +3448,7 @@ int task_nice(const task_t *p) { return TASK_NICE(p); } - -/* - * The only users of task_nice are binfmt_elf and binfmt_elf32. - * binfmt_elf is no longer modular, but binfmt_elf32 still is. - * Therefore, task_nice is needed if there is a compat_mode. - */ -#ifdef CONFIG_COMPAT EXPORT_SYMBOL_GPL(task_nice); -#endif /** * idle_cpu - is a given cpu idle currently? -- cgit v1.2.3 From 9ec4b1f356b3bad928ae8e2aa9caebfa737d52df Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Mon, 27 Jun 2005 15:17:01 -0700 Subject: [PATCH] kprobes: fix single-step out of line - take2 Now that PPC64 has no-execute support, here is a second try to fix the single step out of line during kprobe execution. Kprobes on x86_64 already solved this problem by allocating an executable page and using it as the scratch area for stepping out of line. Reuse that. Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 334f37472c56..65242529a75f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -50,6 +51,106 @@ unsigned int kprobe_cpu = NR_CPUS; static DEFINE_SPINLOCK(kprobe_lock); static struct kprobe *curr_kprobe; +/* + * kprobe->ainsn.insn points to the copy of the instruction to be + * single-stepped. x86_64, POWER4 and above have no-exec support and + * stepping on the instruction on a vmalloced/kmalloced/data page + * is a recipe for disaster + */ +#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) + +struct kprobe_insn_page { + struct hlist_node hlist; + kprobe_opcode_t *insns; /* Page of instruction slots */ + char slot_used[INSNS_PER_PAGE]; + int nused; +}; + +static struct hlist_head kprobe_insn_pages; + +/** + * get_insn_slot() - Find a slot on an executable page for an instruction. + * We allocate an executable page if there's no room on existing ones. + */ +kprobe_opcode_t *get_insn_slot(void) +{ + struct kprobe_insn_page *kip; + struct hlist_node *pos; + + hlist_for_each(pos, &kprobe_insn_pages) { + kip = hlist_entry(pos, struct kprobe_insn_page, hlist); + if (kip->nused < INSNS_PER_PAGE) { + int i; + for (i = 0; i < INSNS_PER_PAGE; i++) { + if (!kip->slot_used[i]) { + kip->slot_used[i] = 1; + kip->nused++; + return kip->insns + (i * MAX_INSN_SIZE); + } + } + /* Surprise! No unused slots. Fix kip->nused. */ + kip->nused = INSNS_PER_PAGE; + } + } + + /* All out of space. Need to allocate a new page. Use slot 0.*/ + kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); + if (!kip) { + return NULL; + } + + /* + * Use module_alloc so this page is within +/- 2GB of where the + * kernel image and loaded module images reside. This is required + * so x86_64 can correctly handle the %rip-relative fixups. + */ + kip->insns = module_alloc(PAGE_SIZE); + if (!kip->insns) { + kfree(kip); + return NULL; + } + INIT_HLIST_NODE(&kip->hlist); + hlist_add_head(&kip->hlist, &kprobe_insn_pages); + memset(kip->slot_used, 0, INSNS_PER_PAGE); + kip->slot_used[0] = 1; + kip->nused = 1; + return kip->insns; +} + +void free_insn_slot(kprobe_opcode_t *slot) +{ + struct kprobe_insn_page *kip; + struct hlist_node *pos; + + hlist_for_each(pos, &kprobe_insn_pages) { + kip = hlist_entry(pos, struct kprobe_insn_page, hlist); + if (kip->insns <= slot && + slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { + int i = (slot - kip->insns) / MAX_INSN_SIZE; + kip->slot_used[i] = 0; + kip->nused--; + if (kip->nused == 0) { + /* + * Page is no longer in use. Free it unless + * it's the last one. We keep the last one + * so as not to have to set it up again the + * next time somebody inserts a probe. + */ + hlist_del(&kip->hlist); + if (hlist_empty(&kprobe_insn_pages)) { + INIT_HLIST_NODE(&kip->hlist); + hlist_add_head(&kip->hlist, + &kprobe_insn_pages); + } else { + module_free(NULL, kip->insns); + kfree(kip); + } + } + return; + } + } +} + /* Locks kprobe: irqs must be disabled */ void lock_kprobes(void) { -- cgit v1.2.3 From 802eae7c800fb7f583e6c06afa363585af2bef00 Mon Sep 17 00:00:00 2001 From: Rusty Lynch Date: Mon, 27 Jun 2005 15:17:08 -0700 Subject: [PATCH] Return probe redesign: architecture independent changes The following is the second version of the function return probe patches I sent out earlier this week. Changes since my last submission include: * Fix in ppc64 code removing an unneeded call to re-enable preemption * Fix a build problem in ia64 when kprobes was turned off * Added another BUG_ON check to each of the architecture trampoline handlers My initial patch description ==> From my experiences with adding return probes to x86_64 and ia64, and the feedback on LKML to those patches, I think we can simplify the design for return probes. The following patch tweaks the original design such that: * Instead of storing the stack address in the return probe instance, the task pointer is stored. This gives us all we need in order to: - find the correct return probe instance when we enter the trampoline (even if we are recursing) - find all left-over return probe instances when the task is going away This has the side effect of simplifying the implementation since more work can be done in kernel/kprobes.c since architecture specific knowledge of the stack layout is no longer required. Specifically, we no longer have: - arch_get_kprobe_task() - arch_kprobe_flush_task() - get_rp_inst_tsk() - get_rp_inst() - trampoline_post_handler() * Instead of splitting the return probe handling and cleanup logic across the pre and post trampoline handlers, all the work is pushed into the pre function (trampoline_probe_handler), and then we skip single stepping the original function. In this case the original instruction to be single stepped was just a NOP, and we can do without the extra interruption. The new flow of events to having a return probe handler execute when a target function exits is: * At system initialization time, a kprobe is inserted at the beginning of kretprobe_trampoline. kernel/kprobes.c use to handle this on it's own, but ia64 needed to do this a little differently (i.e. a function pointer is really a pointer to a structure containing the instruction pointer and a global pointer), so I added the notion of arch_init(), so that kernel/kprobes.c:init_kprobes() now allows architecture specific initialization by calling arch_init() before exiting. Each architecture now registers a kprobe on it's own trampoline function. * register_kretprobe() will insert a kprobe at the beginning of the targeted function with the kprobe pre_handler set to arch_prepare_kretprobe (still no change) * When the target function is entered, the kprobe is fired, calling arch_prepare_kretprobe (still no change) * In arch_prepare_kretprobe() we try to get a free instance and if one is available then we fill out the instance with a pointer to the return probe, the original return address, and a pointer to the task structure (instead of the stack address.) Just like before we change the return address to the trampoline function and mark the instance as used. If multiple return probes are registered for a given target function, then arch_prepare_kretprobe() will get called multiple times for the same task (since our kprobe implementation is able to handle multiple kprobes at the same address.) Past the first call to arch_prepare_kretprobe, we end up with the original address stored in the return probe instance pointing to our trampoline function. (This is a significant difference from the original arch_prepare_kretprobe design.) * Target function executes like normal and then returns to kretprobe_trampoline. * kprobe inserted on the first instruction of kretprobe_trampoline is fired and calls trampoline_probe_handler() (no change here) * trampoline_probe_handler() consumes each of the instances associated with the current task by calling the registered handler function and marking the instance as unused until an instance is found that has a return address different then the trampoline function. (change similar to my previous ia64 RFC) * If the task is killed with some left-over return probe instances (meaning that a target function was entered, but never returned), then we just free any instances associated with the task. (Not much different other then we can handle this without calling architecture specific functions.) There is a known problem that this patch does not yet solve where registering a return probe flush_old_exec or flush_thread will put us in a bad state. Most likely the best way to handle this is to not allow registering return probes on these two functions. (Significant change) This patch series applies to the 2.6.12-rc6-mm1 kernel, and provides: * kernel/kprobes.c changes * i386 patch of existing return probes implementation * x86_64 patch of existing return probe implementation * ia64 implementation * ppc64 implementation (provided by Ananth) This patch implements the architecture independant changes for a reworking of the kprobes based function return probes design. Changes include: * Removing functions for querying a return probe instance off a stack address * Removing the stack_addr field from the kretprobe_instance definition, and adding a task pointer * Adding architecture specific initialization via arch_init() * Removing extern definitions for the architecture trampoline functions (this isn't needed anymore since the architecture handles the initialization of the kprobe in the return probe trampoline function.) Signed-off-by: Rusty Lynch Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 69 ++++++++++++++++---------------------------------------- 1 file changed, 19 insertions(+), 50 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 65242529a75f..90c0e82b650c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -240,12 +240,6 @@ static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } -struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, - .pre_handler = trampoline_probe_handler, - .post_handler = trampoline_post_handler -}; - struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) { struct hlist_node *node; @@ -264,35 +258,18 @@ static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp) return NULL; } -struct kretprobe_instance *get_rp_inst(void *sara) -{ - struct hlist_head *head; - struct hlist_node *node; - struct task_struct *tsk; - struct kretprobe_instance *ri; - - tsk = arch_get_kprobe_task(sara); - head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; - hlist_for_each_entry(ri, node, head, hlist) { - if (ri->stack_addr == sara) - return ri; - } - return NULL; -} - void add_rp_inst(struct kretprobe_instance *ri) { - struct task_struct *tsk; /* * Remove rp inst off the free list - * Add it back when probed function returns */ hlist_del(&ri->uflist); - tsk = arch_get_kprobe_task(ri->stack_addr); + /* Add rp inst onto table */ INIT_HLIST_NODE(&ri->hlist); hlist_add_head(&ri->hlist, - &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]); + &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); /* Also add this rp inst to the used list. */ INIT_HLIST_NODE(&ri->uflist); @@ -319,34 +296,25 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk) return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; } -struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk) -{ - struct task_struct *tsk; - struct hlist_head *head; - struct hlist_node *node; - struct kretprobe_instance *ri; - - head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)]; - - hlist_for_each_entry(ri, node, head, hlist) { - tsk = arch_get_kprobe_task(ri->stack_addr); - if (tsk == tk) - return ri; - } - return NULL; -} - /* - * This function is called from do_exit or do_execv when task tk's stack is - * about to be recycled. Recycle any function-return probe instances - * associated with this task. These represent probed functions that have - * been called but may never return. + * This function is called from exit_thread or flush_thread when task tk's + * stack is being recycled so that we can recycle any function-return probe + * instances associated with this task. These left over instances represent + * probed functions that have been called but will never return. */ void kprobe_flush_task(struct task_struct *tk) { + struct kretprobe_instance *ri; + struct hlist_head *head; + struct hlist_node *node, *tmp; unsigned long flags = 0; + spin_lock_irqsave(&kprobe_lock, flags); - arch_kprobe_flush_task(tk); + head = kretprobe_inst_table_head(current); + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task == tk) + recycle_rp_inst(ri); + } spin_unlock_irqrestore(&kprobe_lock, flags); } @@ -606,9 +574,10 @@ static int __init init_kprobes(void) INIT_HLIST_HEAD(&kretprobe_inst_table[i]); } - err = register_die_notifier(&kprobe_exceptions_nb); - /* Register the trampoline probe for return probe */ - register_kprobe(&trampoline_p); + err = arch_init(); + if (!err) + err = register_die_notifier(&kprobe_exceptions_nb); + return err; } -- cgit v1.2.3 From 314b6a4d80a7a5217c86ffdca926b6f406da0e0e Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 27 Jun 2005 22:29:33 -0700 Subject: [PATCH] kexec: fix sparse warnings Signed-off-by: Alexey Dobriyan Cc: Eric Biederman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index 7843548cf2d9..cdd4dcd8fb63 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -241,7 +241,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, - struct kexec_segment *segments) + struct kexec_segment __user *segments) { int result; struct kimage *image; @@ -650,7 +650,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image, } } - return 0; + return NULL; } static struct page *kimage_alloc_page(struct kimage *image, @@ -696,7 +696,7 @@ static struct page *kimage_alloc_page(struct kimage *image, /* Allocate a page, if we run out of memory give up */ page = kimage_alloc_pages(gfp_mask, 0); if (!page) - return 0; + return NULL; /* If the page cannot be used file it away */ if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { @@ -754,7 +754,7 @@ static int kimage_load_normal_segment(struct kimage *image, unsigned long maddr; unsigned long ubytes, mbytes; int result; - unsigned char *buf; + unsigned char __user *buf; result = 0; buf = segment->buf; @@ -818,7 +818,7 @@ static int kimage_load_crash_segment(struct kimage *image, unsigned long maddr; unsigned long ubytes, mbytes; int result; - unsigned char *buf; + unsigned char __user *buf; result = 0; buf = segment->buf; -- cgit v1.2.3 From f340c0d1a3f40fdcba69cd291530a4debc58748f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 28 Jun 2005 16:40:42 +0200 Subject: [PATCH] Tweak idle thread setup semantics This patch tweaks idle thread setup semantics a bit: instead of setting NEED_RESCHED in init_idle(), we do an explicit schedule() before calling into cpu_idle(). This patch, while having no negative side-effects, enables wider use of cond_resched()s. (which might happen in the stock kernel too, but it's particulary important for voluntary-preempt) Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index e2b0d3e4dd06..5f2182d42241 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4166,6 +4166,14 @@ void show_state(void) read_unlock(&tasklist_lock); } +/** + * init_idle - set up an idle thread for a given CPU + * @idle: task in question + * @cpu: cpu the idle task belongs to + * + * NOTE: this function does not set the idle thread's NEED_RESCHED + * flag, to make booting more robust. + */ void __devinit init_idle(task_t *idle, int cpu) { runqueue_t *rq = cpu_rq(cpu); @@ -4183,7 +4191,6 @@ void __devinit init_idle(task_t *idle, int cpu) #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) idle->oncpu = 1; #endif - set_tsk_need_resched(idle); spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ -- cgit v1.2.3 From 47f176fdaf8924bc83fddcf9658f2fd3ef60d573 Mon Sep 17 00:00:00 2001 From: Luca Falavigna Date: Tue, 28 Jun 2005 20:44:42 -0700 Subject: [PATCH] Using msleep() instead of HZ Use msleep() in a few places. Signed-off-by: Luca Falavigna Acked-by: Ingo Molnar Acked-by: Jeff Garzik Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/autoprobe.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 98d62d8efeaf..3467097ca61a 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -9,6 +9,7 @@ #include #include #include +#include /* * Autodetection depends on the fact that any interrupt that @@ -26,7 +27,7 @@ static DECLARE_MUTEX(probe_sem); */ unsigned long probe_irq_on(void) { - unsigned long val, delay; + unsigned long val; irq_desc_t *desc; unsigned int i; @@ -45,8 +46,7 @@ unsigned long probe_irq_on(void) } /* Wait for longstanding interrupts to trigger. */ - for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) - /* about 20ms delay */ barrier(); + msleep(20); /* * enable any unassigned irqs @@ -68,8 +68,7 @@ unsigned long probe_irq_on(void) /* * Wait for spurious interrupts to trigger */ - for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) - /* about 100ms delay */ barrier(); + msleep(100); /* * Now filter out any obviously spurious interrupts -- cgit v1.2.3 From f01b1b0baa454825ed95c28d2a6a71bbf4510836 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 28 Jun 2005 20:44:47 -0700 Subject: [PATCH] ITIMER_REAL: fix possible deadlock and race As Steven Rostedt pointed out, there are 2 problems with ITIMER_REAL timers. 1. do_setitimer() does not call del_timer_sync() in case when the timer is not pending (it_real_value() returns 0). This is wrong, the timer may still be running, and it can rearm itself. 2. It calls del_timer_sync() with tsk->sighand->siglock held. This is deadlockable, because timer's handler needs this lock too. Signed-off-by: Oleg Nesterov Acked-by: Steven Rostedt Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/itimer.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/itimer.c b/kernel/itimer.c index 1dc988e0d2c7..a72cb0e5aa4b 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -153,11 +153,15 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) switch (which) { case ITIMER_REAL: +again: spin_lock_irq(&tsk->sighand->siglock); interval = tsk->signal->it_real_incr; val = it_real_value(tsk->signal); - if (val) - del_timer_sync(&tsk->signal->real_timer); + /* We are sharing ->siglock with it_real_fn() */ + if (try_to_del_timer_sync(&tsk->signal->real_timer) < 0) { + spin_unlock_irq(&tsk->sighand->siglock); + goto again; + } tsk->signal->it_real_incr = timeval_to_jiffies(&value->it_interval); it_real_arm(tsk, timeval_to_jiffies(&value->it_value)); -- cgit v1.2.3 From 200803dfe4ff772740d63db725ab2f1b185ccf92 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 28 Jun 2005 20:45:18 -0700 Subject: [PATCH] irqpoll Anyone reporting a stuck IRQ should try these options. Its effectiveness varies we've found in the Fedora case. Quite a few systems with misdescribed IRQ routing just work when you use irqpoll. It also fixes up the VIA systems although thats now fixed with the VIA quirk (which we could just make default as its what Redmond OS does but Linus didn't like it historically). A small number of systems have jammed IRQ sources or misdescribes that cause an IRQ that we have no handler registered anywhere for. In those cases it doesn't help. Signed-off-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/handle.c | 2 +- kernel/irq/spurious.c | 113 +++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 436c7d93c00a..c29f83c16497 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -172,7 +172,7 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs) spin_lock(&desc->lock); if (!noirqdebug) - note_interrupt(irq, desc, action_ret); + note_interrupt(irq, desc, action_ret, regs); if (likely(!(desc->status & IRQ_PENDING))) break; desc->status &= ~IRQ_PENDING; diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index ba039e827d58..7df9abd5ec86 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -11,6 +11,83 @@ #include #include +static int irqfixup; + +/* + * Recovery handler for misrouted interrupts. + */ + +static int misrouted_irq(int irq, struct pt_regs *regs) +{ + int i; + irq_desc_t *desc; + int ok = 0; + int work = 0; /* Did we do work for a real IRQ */ + + for(i = 1; i < NR_IRQS; i++) { + struct irqaction *action; + + if (i == irq) /* Already tried */ + continue; + desc = &irq_desc[i]; + spin_lock(&desc->lock); + action = desc->action; + /* Already running on another processor */ + if (desc->status & IRQ_INPROGRESS) { + /* + * Already running: If it is shared get the other + * CPU to go looking for our mystery interrupt too + */ + if (desc->action && (desc->action->flags & SA_SHIRQ)) + desc->status |= IRQ_PENDING; + spin_unlock(&desc->lock); + continue; + } + /* Honour the normal IRQ locking */ + desc->status |= IRQ_INPROGRESS; + spin_unlock(&desc->lock); + while (action) { + /* Only shared IRQ handlers are safe to call */ + if (action->flags & SA_SHIRQ) { + if (action->handler(i, action->dev_id, regs) == + IRQ_HANDLED) + ok = 1; + } + action = action->next; + } + local_irq_disable(); + /* Now clean up the flags */ + spin_lock(&desc->lock); + action = desc->action; + + /* + * While we were looking for a fixup someone queued a real + * IRQ clashing with our walk + */ + + while ((desc->status & IRQ_PENDING) && action) { + /* + * Perform real IRQ processing for the IRQ we deferred + */ + work = 1; + spin_unlock(&desc->lock); + handle_IRQ_event(i, regs, action); + spin_lock(&desc->lock); + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; + /* + * If we did actual work for the real IRQ line we must let the + * IRQ controller clean up too + */ + if(work) + desc->handler->end(i); + spin_unlock(&desc->lock); + } + /* So the caller can adjust the irq error counts */ + return ok; +} + /* * If 99,900 of the previous 100,000 interrupts have not been handled * then assume that the IRQ is stuck in some manner. Drop a diagnostic @@ -31,7 +108,8 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) printk(KERN_ERR "irq event %d: bogus return value %x\n", irq, action_ret); } else { - printk(KERN_ERR "irq %d: nobody cared!\n", irq); + printk(KERN_ERR "irq %d: nobody cared (try booting with " + "the \"irqpoll\" option)\n", irq); } dump_stack(); printk(KERN_ERR "handlers:\n"); @@ -55,7 +133,8 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio } } -void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) +void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret, + struct pt_regs *regs) { if (action_ret != IRQ_HANDLED) { desc->irqs_unhandled++; @@ -63,6 +142,15 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) report_bad_irq(irq, desc, action_ret); } + if (unlikely(irqfixup)) { + /* Don't punish working computers */ + if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { + int ok = misrouted_irq(irq, regs); + if (action_ret == IRQ_NONE) + desc->irqs_unhandled -= ok; + } + } + desc->irq_count++; if (desc->irq_count < 100000) return; @@ -94,3 +182,24 @@ int __init noirqdebug_setup(char *str) __setup("noirqdebug", noirqdebug_setup); +static int __init irqfixup_setup(char *str) +{ + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); + return 1; +} + +__setup("irqfixup", irqfixup_setup); + +static int __init irqpoll_setup(char *str) +{ + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); + printk(KERN_WARNING "This may significantly impact system " + "performance\n"); + return 1; +} + +__setup("irqpoll", irqpoll_setup); -- cgit v1.2.3