diff options
author | Pekka Enberg <penberg@kernel.org> | 2010-09-18 20:45:06 +0300 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2010-10-02 10:28:55 +0300 |
commit | ed59ecbf8904a40cf0a1ee5d6f100d76d2f44e5f (patch) | |
tree | 6f7eb1efc1cec284ce3762702ef2545bbeaafa10 /mm/slub.c | |
parent | ed6c1115c835d822ec5d6356ae3043de54088f43 (diff) | |
download | linux-ed59ecbf8904a40cf0a1ee5d6f100d76d2f44e5f.tar.gz linux-ed59ecbf8904a40cf0a1ee5d6f100d76d2f44e5f.tar.bz2 linux-ed59ecbf8904a40cf0a1ee5d6f100d76d2f44e5f.zip |
Revert "Slub: UP bandaid"
This reverts commit 5249d039500f05a5ab379286b1d23ab9b04d3f2c. It's not needed
after commit bbddff0545878a8649c091a9dd7c43ce91516734 ("percpu: use percpu
allocator on UP too").
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 16 |
1 files changed, 0 insertions, 16 deletions
diff --git a/mm/slub.c b/mm/slub.c index 42ce17304275..7e1fe663795a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2104,24 +2104,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) { -#ifdef CONFIG_SMP - /* - * Will use reserve that does not require slab operation during - * early boot. - */ BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); -#else - /* - * Special hack for UP mode. allocpercpu() falls back to kmalloc - * operations. So we cannot use that before the slab allocator is up - * Simply get the smallest possible compound page. The page will be - * released via kfree() when the cpu caches are resized later. - */ - if (slab_state < UP) - s->cpu_slab = (__percpu void *)kmalloc_large(PAGE_SIZE << 1, GFP_NOWAIT); - else -#endif s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); |