summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-06-06 10:31:26 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-06-06 10:31:26 -0400
commit4b01abc977574253f466f65463019de2571c6401 (patch)
treec330615fe9f277ad51a91da3db2d690a3336fb02 /mm
parent47c96298cd0b04b4478206fde55fd6a6431de980 (diff)
parent1def630a6a49dda5bc89dfbd86656293640456f0 (diff)
downloadlinux-4b01abc977574253f466f65463019de2571c6401.tar.gz
linux-4b01abc977574253f466f65463019de2571c6401.tar.bz2
linux-4b01abc977574253f466f65463019de2571c6401.zip
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/slab.c27
2 files changed, 17 insertions, 18 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1ae2b2cc3a54..70df5c0d957e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -91,8 +91,8 @@ static void grow_zone_span(struct zone *zone,
if (start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
- if (end_pfn > old_zone_end_pfn)
- zone->spanned_pages = end_pfn - zone->zone_start_pfn;
+ zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
+ zone->zone_start_pfn;
zone_span_writeunlock(zone);
}
@@ -106,8 +106,8 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
if (start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
- if (end_pfn > old_pgdat_end_pfn)
- pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn;
+ pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
+ pgdat->node_start_pfn;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
diff --git a/mm/slab.c b/mm/slab.c
index d31a06bfbea5..f1b644eb39d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
-/* Max number of objs-per-slab for caches which use off-slab slabs.
- * Needed to avoid a possible looping condition in cache_grow().
- */
-static unsigned long offslab_limit;
-
/*
* struct slab
*
@@ -1356,12 +1351,6 @@ void __init kmem_cache_init(void)
NULL, NULL);
}
- /* Inc off-slab bufctl limit until the ceiling is hit. */
- if (!(OFF_SLAB(sizes->cs_cachep))) {
- offslab_limit = sizes->cs_size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
- }
-
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -1780,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
+ unsigned long offslab_limit;
size_t left_over = 0;
int gfporder;
@@ -1791,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (!num)
continue;
- /* More than offslab_limit objects will cause problems */
- if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
- break;
+ if (flags & CFLGS_OFF_SLAB) {
+ /*
+ * Max number of objs-per-slab for caches which
+ * use off-slab slabs. Needed to avoid a possible
+ * looping condition in cache_grow().
+ */
+ offslab_limit = size - sizeof(struct slab);
+ offslab_limit /= sizeof(kmem_bufctl_t);
+
+ if (num > offslab_limit)
+ break;
+ }
/* Found something acceptable - save it away */
cachep->num = num;