summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-05-15 09:43:59 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-05-15 11:20:55 -0700
commitac924c6034d9095f95ee889f7e31bbb9145da0c2 (patch)
tree2db4fc64a008ff2b81a0faf381d8c21ccc5006b2 /mm
parent5afdbd6e84c7fbdaa7cfde4cbee0d3a5f4f56da2 (diff)
downloadlinux-ac924c6034d9095f95ee889f7e31bbb9145da0c2.tar.gz
linux-ac924c6034d9095f95ee889f7e31bbb9145da0c2.tar.bz2
linux-ac924c6034d9095f95ee889f7e31bbb9145da0c2.zip
[PATCH] setup_per_zone_pages_min() overflow fix
As pointed out in http://bugzilla.kernel.org/show_bug.cgi?id=6490, this function can experience overflows on 32-bit machines, causing our response to changed values of min_free_kbytes to go whacky. Fixing it efficiently is all too hard, so fix it with 64-bit math instead. Cc: Ake Sandgren <ake.sandgren@hpc2n.umu.se> Cc: Martin Bligh <mbligh@google.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea77c999047e..813b4ec1298a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
#include <linux/mempolicy.h>
#include <asm/tlbflush.h>
+#include <asm/div64.h>
#include "internal.h"
/*
@@ -2566,9 +2567,11 @@ void setup_per_zone_pages_min(void)
}
for_each_zone(zone) {
- unsigned long tmp;
+ u64 tmp;
+
spin_lock_irqsave(&zone->lru_lock, flags);
- tmp = (pages_min * zone->present_pages) / lowmem_pages;
+ tmp = (u64)pages_min * zone->present_pages;
+ do_div(tmp, lowmem_pages);
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2598,8 @@ void setup_per_zone_pages_min(void)
zone->pages_min = tmp;
}
- zone->pages_low = zone->pages_min + tmp / 4;
- zone->pages_high = zone->pages_min + tmp / 2;
+ zone->pages_low = zone->pages_min + (tmp >> 2);
+ zone->pages_high = zone->pages_min + (tmp >> 1);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}