summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-07-07 10:32:56 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2009-07-07 10:32:56 +0100
commit4b8a96744c0c27ab94fb4e8155d4384c3b399e27 (patch)
treee769e01395a3a13e28bf146fce20a3f3f2a95750
parentbf2a76b317c6ccc6f7b6b1dc09664c5b6a155c61 (diff)
downloadlinux-4b8a96744c0c27ab94fb4e8155d4384c3b399e27.tar.gz
linux-4b8a96744c0c27ab94fb4e8155d4384c3b399e27.tar.bz2
linux-4b8a96744c0c27ab94fb4e8155d4384c3b399e27.zip
kmemleak: Add more cond_resched() calls in the scanning thread
Following recent fix to no longer reschedule in the scan_block() function, the system may become unresponsive with !PREEMPT. This patch re-adds the cond_resched() call to scan_block() but conditioned by the allow_resched parameter. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Ingo Molnar <mingo@elte.hu>
-rw-r--r--mm/kmemleak.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 60065531f60c..93f14818e901 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -807,7 +807,7 @@ static int scan_should_stop(void)
* found to the gray list.
*/
static void scan_block(void *_start, void *_end,
- struct kmemleak_object *scanned)
+ struct kmemleak_object *scanned, int allow_resched)
{
unsigned long *ptr;
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
@@ -818,6 +818,8 @@ static void scan_block(void *_start, void *_end,
unsigned long pointer = *ptr;
struct kmemleak_object *object;
+ if (allow_resched)
+ cond_resched();
if (scan_should_stop())
break;
@@ -881,12 +883,12 @@ static void scan_object(struct kmemleak_object *object)
goto out;
if (hlist_empty(&object->area_list))
scan_block((void *)object->pointer,
- (void *)(object->pointer + object->size), object);
+ (void *)(object->pointer + object->size), object, 0);
else
hlist_for_each_entry(area, elem, &object->area_list, node)
scan_block((void *)(object->pointer + area->offset),
(void *)(object->pointer + area->offset
- + area->length), object);
+ + area->length), object, 0);
out:
spin_unlock_irqrestore(&object->lock, flags);
}
@@ -931,14 +933,14 @@ static void kmemleak_scan(void)
rcu_read_unlock();
/* data/bss scanning */
- scan_block(_sdata, _edata, NULL);
- scan_block(__bss_start, __bss_stop, NULL);
+ scan_block(_sdata, _edata, NULL, 1);
+ scan_block(__bss_start, __bss_stop, NULL, 1);
#ifdef CONFIG_SMP
/* per-cpu sections scanning */
for_each_possible_cpu(i)
scan_block(__per_cpu_start + per_cpu_offset(i),
- __per_cpu_end + per_cpu_offset(i), NULL);
+ __per_cpu_end + per_cpu_offset(i), NULL, 1);
#endif
/*
@@ -960,7 +962,7 @@ static void kmemleak_scan(void)
/* only scan if page is in use */
if (page_count(page) == 0)
continue;
- scan_block(page, page + 1, NULL);
+ scan_block(page, page + 1, NULL, 1);
}
}
@@ -972,7 +974,8 @@ static void kmemleak_scan(void)
read_lock(&tasklist_lock);
for_each_process(task)
scan_block(task_stack_page(task),
- task_stack_page(task) + THREAD_SIZE, NULL);
+ task_stack_page(task) + THREAD_SIZE,
+ NULL, 0);
read_unlock(&tasklist_lock);
}