diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-03 09:08:57 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-03 09:08:57 -0400 |
commit | 76467874b83835129dc454e3a7a8e5d1186101b0 (patch) | |
tree | 162129f0c36c35be4aa323cf00626db0e804c3fc /mm | |
parent | 8628de0583504138551a05ad44ca388467f0f552 (diff) | |
parent | 6246b6128bbe34d0752f119cf7c5111c85fe481d (diff) | |
download | linux-76467874b83835129dc454e3a7a8e5d1186101b0.tar.gz linux-76467874b83835129dc454e3a7a8e5d1186101b0.tar.bz2 linux-76467874b83835129dc454e3a7a8e5d1186101b0.zip |
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r-- | mm/fadvise.c | 20 | ||||
-rw-r--r-- | mm/highmem.c | 15 | ||||
-rw-r--r-- | mm/hugetlb.c | 6 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 9 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 18 | ||||
-rw-r--r-- | mm/swap_state.c | 3 | ||||
-rw-r--r-- | mm/swapfile.c | 14 | ||||
-rw-r--r-- | mm/vmalloc.c | 3 |
10 files changed, 32 insertions, 60 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c index 907c39257ca0..0a03357a1f8e 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -35,17 +35,6 @@ * * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk. * - * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently - * dirty pages at the disk. - * - * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push - * all of the currently dirty pages at the disk, wait until they have been - * written. - * - * It should be noted that none of these operations write out the file's - * metadata. So unless the application is strictly performing overwrites of - * already-instantiated disk blocks, there are no guarantees here that the data - * will be available after a crash. */ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) { @@ -129,15 +118,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) invalidate_mapping_pages(mapping, start_index, end_index); break; - case LINUX_FADV_ASYNC_WRITE: - ret = __filemap_fdatawrite_range(mapping, offset, endbyte, - WB_SYNC_NONE); - break; - case LINUX_FADV_WRITE_WAIT: - ret = wait_on_page_writeback_range(mapping, - offset >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); - break; default: ret = -EINVAL; } diff --git a/mm/highmem.c b/mm/highmem.c index 55885f64af40..9b274fdf9d08 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void) pkmap_count[i] = 0; /* sanity check */ - if (pte_none(pkmap_page_table[i])) - BUG(); + BUG_ON(pte_none(pkmap_page_table[i])); /* * Don't need an atomic fetch-and-clear op here; @@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page) if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; - if (pkmap_count[PKMAP_NR(vaddr)] < 2) - BUG(); + BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); spin_unlock(&kmap_lock); return (void*) vaddr; } @@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page) spin_lock(&kmap_lock); vaddr = (unsigned long)page_address(page); - if (!vaddr) - BUG(); + BUG_ON(!vaddr); nr = PKMAP_NR(vaddr); /* @@ -220,8 +217,7 @@ static __init int init_emergency_pool(void) return 0; page_pool = mempool_create_page_pool(POOL_SIZE, 0); - if (!page_pool) - BUG(); + BUG_ON(!page_pool); printk("highmem bounce pool size: %d pages\n", POOL_SIZE); return 0; @@ -264,8 +260,7 @@ int init_emergency_isa_pool(void) isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, mempool_free_pages, (void *) 0); - if (!isa_page_pool) - BUG(); + BUG_ON(!isa_page_pool); printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); return 0; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ebad6bbb3501..832f676ca038 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -334,6 +334,7 @@ static unsigned long set_max_huge_pages(unsigned long count) return nr_huge_pages; spin_lock(&hugetlb_lock); + count = max(count, reserved_huge_pages); try_to_free_low(count); while (count < nr_huge_pages) { struct page *page = dequeue_huge_page(NULL, 0); @@ -697,9 +698,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; page = pte_page(*pte); same_page: - get_page(page); - if (pages) + if (pages) { + get_page(page); pages[i] = page + pfn_offset; + } if (vmas) vmas[i] = vma; diff --git a/mm/memory.c b/mm/memory.c index 8d8f52569f32..0ec7bc644271 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -87,7 +87,7 @@ int randomize_va_space __read_mostly = 1; static int __init disable_randmaps(char *s) { randomize_va_space = 0; - return 0; + return 1; } __setup("norandmaps", disable_randmaps); diff --git a/mm/mmap.c b/mm/mmap.c index 4f5b5709136a..e780d19aa214 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm) i = browse_rb(&mm->mm_rb); if (i != mm->map_count) printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; - if (bug) - BUG(); + BUG_ON(bug); } #else #define validate_mm(mm) do { } while (0) @@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) struct rb_node ** rb_link, * rb_parent; __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); - if (__vma && __vma->vm_start < vma->vm_end) - BUG(); + BUG_ON(__vma && __vma->vm_start < vma->vm_end); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } @@ -813,8 +811,7 @@ try_prev: * (e.g. stash info in next's anon_vma_node when assigning * an anon_vma, or when trying vma_merge). Another time. */ - if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) - BUG(); + BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma); if (!near) goto none; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 893d7677579e..6dcce3a4bbdc 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping) /** * balance_dirty_pages_ratelimited_nr - balance dirty memory state * @mapping: address_space which was dirtied - * @nr_pages: number of pages which the caller has just dirtied + * @nr_pages_dirtied: number of pages which the caller has just dirtied * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's diff --git a/mm/slab.c b/mm/slab.c index 4cbf8bb13557..f055c1420216 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void) if (cache_cache.num) break; } - if (!cache_cache.num) - BUG(); + BUG_ON(!cache_cache.num); cache_cache.gfporder = order; cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + @@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * Always checks flags, a caller might be expecting debug support which * isn't available. */ - if (flags & ~CREATE_MASK) - BUG(); + BUG_ON(flags & ~CREATE_MASK); /* * Check that size is in terms of words. This is needed to avoid @@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node) slabp = list_entry(l3->slabs_free.prev, struct slab, list); #if DEBUG - if (slabp->inuse) - BUG(); + BUG_ON(slabp->inuse); #endif list_del(&slabp->list); @@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep) */ int kmem_cache_shrink(struct kmem_cache *cachep) { - if (!cachep || in_interrupt()) - BUG(); + BUG_ON(!cachep || in_interrupt()); return __cache_shrink(cachep); } @@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep) int i; struct kmem_list3 *l3; - if (!cachep || in_interrupt()) - BUG(); + BUG_ON(!cachep || in_interrupt()); /* Don't let CPUs to come and go */ lock_cpu_hotplug(); @@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) - BUG(); + BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); if (flags & SLAB_NO_GROW) return 0; diff --git a/mm/swap_state.c b/mm/swap_state.c index d7af296833fc..e0e1583f32c2 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask) swp_entry_t entry; int err; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); for (;;) { entry = get_swap_page(); diff --git a/mm/swapfile.c b/mm/swapfile.c index 39aa9d129612..e5fd5385f0cc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -397,18 +397,24 @@ void free_swap_and_cache(swp_entry_t entry) p = swap_info_get(entry); if (p) { - if (swap_entry_free(p, swp_offset(entry)) == 1) - page = find_trylock_page(&swapper_space, entry.val); + if (swap_entry_free(p, swp_offset(entry)) == 1) { + page = find_get_page(&swapper_space, entry.val); + if (page && unlikely(TestSetPageLocked(page))) { + page_cache_release(page); + page = NULL; + } + } spin_unlock(&swap_lock); } if (page) { int one_user; BUG_ON(PagePrivate(page)); - page_cache_get(page); one_user = (page_count(page) == 2); /* Only cache user (+us), or swap space full? Free it! */ - if (!PageWriteback(page) && (one_user || vm_swap_full())) { + /* Also recheck PageSwapCache after page is locked (above) */ + if (PageSwapCache(page) && !PageWriteback(page) && + (one_user || vm_swap_full())) { delete_from_swap_cache(page); SetPageDirty(page); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 729eb3eec75f..c0504f1e34eb 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages) int i; for (i = 0; i < area->nr_pages; i++) { - if (unlikely(!area->pages[i])) - BUG(); + BUG_ON(!area->pages[i]); __free_page(area->pages[i]); } |