diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 04:03:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 10:23:02 -0700 |
commit | 94f6030ca792c57422f04a73e7a872d8325946d3 (patch) | |
tree | 0197f24d82b1706f1b0521f2cf68feeff64123df /mm | |
parent | 81cda6626178cd55297831296ba8ecedbfd8b52d (diff) | |
download | linux-94f6030ca792c57422f04a73e7a872d8325946d3.tar.gz linux-94f6030ca792c57422f04a73e7a872d8325946d3.tar.bz2 linux-94f6030ca792c57422f04a73e7a872d8325946d3.zip |
Slab allocators: Replace explicit zeroing with __GFP_ZERO
kmalloc_node() and kmem_cache_alloc_node() were not available in a zeroing
variant in the past. But with __GFP_ZERO it is possible now to do zeroing
while allocating.
Use __GFP_ZERO to remove the explicit clearing of memory via memset whereever
we can.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/allocpercpu.c | 9 | ||||
-rw-r--r-- | mm/mempool.c | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 6 |
3 files changed, 7 insertions, 11 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index b2486cf887a0..00b02623f008 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c @@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) int node = cpu_to_node(cpu); BUG_ON(pdata->ptrs[cpu]); - if (node_online(node)) { - /* FIXME: kzalloc_node(size, gfp, node) */ - pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); - if (pdata->ptrs[cpu]) - memset(pdata->ptrs[cpu], 0, size); - } else + if (node_online(node)) + pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); + else pdata->ptrs[cpu] = kzalloc(size, gfp); return pdata->ptrs[cpu]; } diff --git a/mm/mempool.c b/mm/mempool.c index 3e8f1fed0e1f..02d5ec3feabc 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, int node_id) { mempool_t *pool; - pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); + pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); if (!pool) return NULL; - memset(pool, 0, sizeof(*pool)); pool->elements = kmalloc_node(min_nr * sizeof(void *), GFP_KERNEL, node_id); if (!pool->elements) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ddf87145cc49..8e05a11155c9 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -432,11 +432,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); + pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, + PAGE_KERNEL, node); area->flags |= VM_VPAGES; } else { pages = kmalloc_node(array_size, - (gfp_mask & GFP_LEVEL_MASK), + (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO, node); } area->pages = pages; @@ -445,7 +446,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, kfree(area); return NULL; } - memset(area->pages, 0, array_size); for (i = 0; i < area->nr_pages; i++) { if (node < 0) |