diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 13:50:21 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 13:50:21 -0800 |
commit | 6bec0035286119eefc32a5b1102127e6a4032cb2 (patch) | |
tree | 440fab001b4c877b0b0c9fd62d8392e956e387e3 /mm | |
parent | 5d8e7fb6916556e9b476de33404e8c9e2c9aee61 (diff) | |
parent | 15d0f5ea348b9c4e6d41df294dde38a56a39c7bf (diff) | |
download | linux-6bec0035286119eefc32a5b1102127e6a4032cb2.tar.gz linux-6bec0035286119eefc32a5b1102127e6a4032cb2.tar.bz2 linux-6bec0035286119eefc32a5b1102127e6a4032cb2.zip |
Merge branch 'for-3.20/bdi' of git://git.kernel.dk/linux-block
Pull backing device changes from Jens Axboe:
"This contains a cleanup of how the backing device is handled, in
preparation for a rework of the life time rules. In this part, the
most important change is to split the unrelated nommu mmap flags from
it, but also removing a backing_dev_info pointer from the
address_space (and inode), and a cleanup of other various minor bits.
Christoph did all the work here, I just fixed an oops with pages that
have a swap backing. Arnd fixed a missing export, and Oleg killed the
lustre backing_dev_info from staging. Last patch was from Al,
unexporting parts that are now no longer needed outside"
* 'for-3.20/bdi' of git://git.kernel.dk/linux-block:
Make super_blocks and sb_lock static
mtd: export new mtd_mmap_capabilities
fs: make inode_to_bdi() handle NULL inode
staging/lustre/llite: get rid of backing_dev_info
fs: remove default_backing_dev_info
fs: don't reassign dirty inodes to default_backing_dev_info
nfs: don't call bdi_unregister
ceph: remove call to bdi_unregister
fs: remove mapping->backing_dev_info
fs: export inode_to_bdi and use it in favor of mapping->backing_dev_info
nilfs2: set up s_bdi like the generic mount_bdev code
block_dev: get bdev inode bdi directly from the block device
block_dev: only write bdev inode on close
fs: introduce f_op->mmap_capabilities for nommu mmap support
fs: kill BDI_CAP_SWAP_BACKED
fs: deduplicate noop_backing_dev_info
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 107 | ||||
-rw-r--r-- | mm/fadvise.c | 4 | ||||
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/filemap_xip.c | 3 | ||||
-rw-r--r-- | mm/madvise.c | 17 | ||||
-rw-r--r-- | mm/nommu.c | 69 | ||||
-rw-r--r-- | mm/page-writeback.c | 29 | ||||
-rw-r--r-- | mm/readahead.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 24 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/swap_state.c | 6 | ||||
-rw-r--r-- | mm/truncate.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 4 |
13 files changed, 95 insertions, 180 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 0ae0df55000b..7690ec77c722 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -14,19 +14,10 @@ static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); -struct backing_dev_info default_backing_dev_info = { - .name = "default", - .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, - .state = 0, - .capabilities = BDI_CAP_MAP_COPY, -}; -EXPORT_SYMBOL_GPL(default_backing_dev_info); - struct backing_dev_info noop_backing_dev_info = { .name = "noop", .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, }; -EXPORT_SYMBOL_GPL(noop_backing_dev_info); static struct class *bdi_class; @@ -40,17 +31,6 @@ LIST_HEAD(bdi_list); /* bdi_wq serves all asynchronous writeback tasks */ struct workqueue_struct *bdi_wq; -static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) -{ - if (wb1 < wb2) { - spin_lock(&wb1->list_lock); - spin_lock_nested(&wb2->list_lock, 1); - } else { - spin_lock(&wb2->list_lock); - spin_lock_nested(&wb1->list_lock, 1); - } -} - #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> @@ -264,9 +244,6 @@ static int __init default_bdi_init(void) if (!bdi_wq) return -ENOMEM; - err = bdi_init(&default_backing_dev_info); - if (!err) - bdi_register(&default_backing_dev_info, NULL, "default"); err = bdi_init(&noop_backing_dev_info); return err; @@ -355,19 +332,19 @@ EXPORT_SYMBOL(bdi_register_dev); */ static void bdi_wb_shutdown(struct backing_dev_info *bdi) { - if (!bdi_cap_writeback_dirty(bdi)) + /* Make sure nobody queues further work */ + spin_lock_bh(&bdi->wb_lock); + if (!test_and_clear_bit(BDI_registered, &bdi->state)) { + spin_unlock_bh(&bdi->wb_lock); return; + } + spin_unlock_bh(&bdi->wb_lock); /* * Make sure nobody finds us on the bdi_list anymore */ bdi_remove_from_list(bdi); - /* Make sure nobody queues further work */ - spin_lock_bh(&bdi->wb_lock); - clear_bit(BDI_registered, &bdi->state); - spin_unlock_bh(&bdi->wb_lock); - /* * Drain work list and shutdown the delayed_work. At this point, * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi @@ -375,37 +352,22 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) */ mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); flush_delayed_work(&bdi->wb.dwork); - WARN_ON(!list_empty(&bdi->work_list)); - WARN_ON(delayed_work_pending(&bdi->wb.dwork)); } /* - * This bdi is going away now, make sure that no super_blocks point to it + * Called when the device behind @bdi has been removed or ejected. + * + * We can't really do much here except for reducing the dirty ratio at + * the moment. In the future we should be able to set a flag so that + * the filesystem can handle errors at mark_inode_dirty time instead + * of only at writeback time. */ -static void bdi_prune_sb(struct backing_dev_info *bdi) -{ - struct super_block *sb; - - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (sb->s_bdi == bdi) - sb->s_bdi = &default_backing_dev_info; - } - spin_unlock(&sb_lock); -} - void bdi_unregister(struct backing_dev_info *bdi) { - if (bdi->dev) { - bdi_set_min_ratio(bdi, 0); - trace_writeback_bdi_unregister(bdi); - bdi_prune_sb(bdi); + if (WARN_ON_ONCE(!bdi->dev)) + return; - bdi_wb_shutdown(bdi); - bdi_debug_unregister(bdi); - device_unregister(bdi->dev); - bdi->dev = NULL; - } + bdi_set_min_ratio(bdi, 0); } EXPORT_SYMBOL(bdi_unregister); @@ -474,37 +436,19 @@ void bdi_destroy(struct backing_dev_info *bdi) { int i; - /* - * Splice our entries to the default_backing_dev_info. This - * condition shouldn't happen. @wb must be empty at this point and - * dirty inodes on it might cause other issues. This workaround is - * added by ce5f8e779519 ("writeback: splice dirty inode entries to - * default bdi on bdi_destroy()") without root-causing the issue. - * - * http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com - * http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350 - * - * We should probably add WARN_ON() to find out whether it still - * happens and track it down if so. - */ - if (bdi_has_dirty_io(bdi)) { - struct bdi_writeback *dst = &default_backing_dev_info.wb; - - bdi_lock_two(&bdi->wb, dst); - list_splice(&bdi->wb.b_dirty, &dst->b_dirty); - list_splice(&bdi->wb.b_io, &dst->b_io); - list_splice(&bdi->wb.b_more_io, &dst->b_more_io); - spin_unlock(&bdi->wb.list_lock); - spin_unlock(&dst->list_lock); - } - - bdi_unregister(bdi); + bdi_wb_shutdown(bdi); + WARN_ON(!list_empty(&bdi->work_list)); WARN_ON(delayed_work_pending(&bdi->wb.dwork)); + if (bdi->dev) { + bdi_debug_unregister(bdi); + device_unregister(bdi->dev); + bdi->dev = NULL; + } + for (i = 0; i < NR_BDI_STAT_ITEMS; i++) percpu_counter_destroy(&bdi->bdi_stat[i]); - fprop_local_destroy_percpu(&bdi->completions); } EXPORT_SYMBOL(bdi_destroy); @@ -513,13 +457,12 @@ EXPORT_SYMBOL(bdi_destroy); * For use from filesystems to quickly init and register a bdi associated * with dirty writeback */ -int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, - unsigned int cap) +int bdi_setup_and_register(struct backing_dev_info *bdi, char *name) { int err; bdi->name = name; - bdi->capabilities = cap; + bdi->capabilities = 0; err = bdi_init(bdi); if (err) return err; diff --git a/mm/fadvise.c b/mm/fadvise.c index 2ad7adf4f0a4..fac23ecf8d72 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -73,7 +73,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) else endbyte--; /* inclusive */ - bdi = mapping->backing_dev_info; + bdi = inode_to_bdi(mapping->host); switch (advice) { case POSIX_FADV_NORMAL: @@ -113,7 +113,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) case POSIX_FADV_NOREUSE: break; case POSIX_FADV_DONTNEED: - if (!bdi_write_congested(mapping->backing_dev_info)) + if (!bdi_write_congested(bdi)) __filemap_fdatawrite_range(mapping, offset, endbyte, WB_SYNC_NONE); diff --git a/mm/filemap.c b/mm/filemap.c index bf7a27142704..d9f5336552d7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -211,7 +211,7 @@ void __delete_from_page_cache(struct page *page, void *shadow) */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); - dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); } } @@ -2564,7 +2564,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) size_t count = iov_iter_count(from); /* We can write back this queue in page reclaim */ - current->backing_dev_info = mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 70c09da1a419..c175f9f25210 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -9,6 +9,7 @@ */ #include <linux/fs.h> +#include <linux/backing-dev.h> #include <linux/pagemap.h> #include <linux/export.h> #include <linux/uio.h> @@ -409,7 +410,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, count = len; /* We can write back this queue in page reclaim */ - current->backing_dev_info = mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); if (ret) diff --git a/mm/madvise.c b/mm/madvise.c index d79fb5e8f80a..1077cbdc8b52 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -222,19 +222,22 @@ static long madvise_willneed(struct vm_area_struct *vma, struct file *file = vma->vm_file; #ifdef CONFIG_SWAP - if (!file || mapping_cap_swap_backed(file->f_mapping)) { + if (!file) { *prev = vma; - if (!file) - force_swapin_readahead(vma, start, end); - else - force_shm_swapin_readahead(vma, start, end, - file->f_mapping); + force_swapin_readahead(vma, start, end); return 0; } -#endif + if (shmem_mapping(file->f_mapping)) { + *prev = vma; + force_shm_swapin_readahead(vma, start, end, + file->f_mapping); + return 0; + } +#else if (!file) return -EBADF; +#endif if (file->f_mapping->a_ops->get_xip_mem) { /* no bad return value, but ignore advice */ diff --git a/mm/nommu.c b/mm/nommu.c index 1a19fb3b0463..7296360fc057 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -980,9 +980,6 @@ static int validate_mmap_request(struct file *file, return -EOVERFLOW; if (file) { - /* validate file mapping requests */ - struct address_space *mapping; - /* files must support mmap */ if (!file->f_op->mmap) return -ENODEV; @@ -991,28 +988,22 @@ static int validate_mmap_request(struct file *file, * - we support chardevs that provide their own "memory" * - we support files/blockdevs that are memory backed */ - mapping = file->f_mapping; - if (!mapping) - mapping = file_inode(file)->i_mapping; - - capabilities = 0; - if (mapping && mapping->backing_dev_info) - capabilities = mapping->backing_dev_info->capabilities; - - if (!capabilities) { + if (file->f_op->mmap_capabilities) { + capabilities = file->f_op->mmap_capabilities(file); + } else { /* no explicit capabilities set, so assume some * defaults */ switch (file_inode(file)->i_mode & S_IFMT) { case S_IFREG: case S_IFBLK: - capabilities = BDI_CAP_MAP_COPY; + capabilities = NOMMU_MAP_COPY; break; case S_IFCHR: capabilities = - BDI_CAP_MAP_DIRECT | - BDI_CAP_READ_MAP | - BDI_CAP_WRITE_MAP; + NOMMU_MAP_DIRECT | + NOMMU_MAP_READ | + NOMMU_MAP_WRITE; break; default: @@ -1023,9 +1014,9 @@ static int validate_mmap_request(struct file *file, /* eliminate any capabilities that we can't support on this * device */ if (!file->f_op->get_unmapped_area) - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; if (!file->f_op->read) - capabilities &= ~BDI_CAP_MAP_COPY; + capabilities &= ~NOMMU_MAP_COPY; /* The file shall have been opened with read permission. */ if (!(file->f_mode & FMODE_READ)) @@ -1044,29 +1035,29 @@ static int validate_mmap_request(struct file *file, if (locks_verify_locked(file)) return -EAGAIN; - if (!(capabilities & BDI_CAP_MAP_DIRECT)) + if (!(capabilities & NOMMU_MAP_DIRECT)) return -ENODEV; /* we mustn't privatise shared mappings */ - capabilities &= ~BDI_CAP_MAP_COPY; + capabilities &= ~NOMMU_MAP_COPY; } else { /* we're going to read the file into private memory we * allocate */ - if (!(capabilities & BDI_CAP_MAP_COPY)) + if (!(capabilities & NOMMU_MAP_COPY)) return -ENODEV; /* we don't permit a private writable mapping to be * shared with the backing device */ if (prot & PROT_WRITE) - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; } - if (capabilities & BDI_CAP_MAP_DIRECT) { - if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || - ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || - ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) + if (capabilities & NOMMU_MAP_DIRECT) { + if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) || + ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) || + ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC)) ) { - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; if (flags & MAP_SHARED) { printk(KERN_WARNING "MAP_SHARED not completely supported on !MMU\n"); @@ -1083,21 +1074,21 @@ static int validate_mmap_request(struct file *file, } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { /* handle implication of PROT_EXEC by PROT_READ */ if (current->personality & READ_IMPLIES_EXEC) { - if (capabilities & BDI_CAP_EXEC_MAP) + if (capabilities & NOMMU_MAP_EXEC) prot |= PROT_EXEC; } } else if ((prot & PROT_READ) && (prot & PROT_EXEC) && - !(capabilities & BDI_CAP_EXEC_MAP) + !(capabilities & NOMMU_MAP_EXEC) ) { /* backing file is not executable, try to copy */ - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; } } else { /* anonymous mappings are always memory backed and can be * privately mapped */ - capabilities = BDI_CAP_MAP_COPY; + capabilities = NOMMU_MAP_COPY; /* handle PROT_EXEC implication by PROT_READ */ if ((prot & PROT_READ) && @@ -1129,7 +1120,7 @@ static unsigned long determine_vm_flags(struct file *file, vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); /* vm_flags |= mm->def_flags; */ - if (!(capabilities & BDI_CAP_MAP_DIRECT)) { + if (!(capabilities & NOMMU_MAP_DIRECT)) { /* attempt to share read-only copies of mapped file chunks */ vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (file && !(prot & PROT_WRITE)) @@ -1138,7 +1129,7 @@ static unsigned long determine_vm_flags(struct file *file, /* overlay a shareable mapping on the backing device or inode * if possible - used for chardevs, ramfs/tmpfs/shmfs and * romfs/cramfs */ - vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); + vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); if (flags & MAP_SHARED) vm_flags |= VM_SHARED; } @@ -1191,7 +1182,7 @@ static int do_mmap_private(struct vm_area_struct *vma, * shared mappings on devices or memory * - VM_MAYSHARE will be set if it may attempt to share */ - if (capabilities & BDI_CAP_MAP_DIRECT) { + if (capabilities & NOMMU_MAP_DIRECT) { ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); if (ret == 0) { /* shouldn't return success if we're not sharing */ @@ -1380,7 +1371,7 @@ unsigned long do_mmap_pgoff(struct file *file, if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { /* new mapping is not a subset of the region */ - if (!(capabilities & BDI_CAP_MAP_DIRECT)) + if (!(capabilities & NOMMU_MAP_DIRECT)) goto sharing_violation; continue; } @@ -1419,7 +1410,7 @@ unsigned long do_mmap_pgoff(struct file *file, * - this is the hook for quasi-memory character devices to * tell us the location of a shared mapping */ - if (capabilities & BDI_CAP_MAP_DIRECT) { + if (capabilities & NOMMU_MAP_DIRECT) { addr = file->f_op->get_unmapped_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) { @@ -1431,10 +1422,10 @@ unsigned long do_mmap_pgoff(struct file *file, * the mapping so we'll have to attempt to copy * it */ ret = -ENODEV; - if (!(capabilities & BDI_CAP_MAP_COPY)) + if (!(capabilities & NOMMU_MAP_COPY)) goto error_just_free; - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; } else { vma->vm_start = region->vm_start = addr; vma->vm_end = region->vm_end = addr + len; @@ -1445,7 +1436,7 @@ unsigned long do_mmap_pgoff(struct file *file, vma->vm_region = region; /* set up the mapping - * - the region is filled in if BDI_CAP_MAP_DIRECT is still set + * - the region is filled in if NOMMU_MAP_DIRECT is still set */ if (file && vma->vm_flags & VM_SHARED) ret = do_mmap_shared_file(vma); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6a73e47e81c6..45e187b2d971 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1351,7 +1351,7 @@ static void balance_dirty_pages(struct address_space *mapping, unsigned long task_ratelimit; unsigned long dirty_ratelimit; unsigned long pos_ratio; - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; unsigned long start_time = jiffies; @@ -1574,7 +1574,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; */ void balance_dirty_pages_ratelimited(struct address_space *mapping) { - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); int ratelimit; int *p; @@ -1929,7 +1929,7 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; - trace_wbc_writepage(wbc, mapping->backing_dev_info); + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { @@ -2094,10 +2094,12 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) trace_writeback_dirty_page(page, mapping); if (mapping_cap_account_dirty(mapping)) { + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); + __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); - __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); - __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); + __inc_bdi_stat(bdi, BDI_RECLAIMABLE); + __inc_bdi_stat(bdi, BDI_DIRTIED); task_io_account_write(PAGE_CACHE_SIZE); current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); @@ -2156,7 +2158,7 @@ void account_page_redirty(struct page *page) if (mapping && mapping_cap_account_dirty(mapping)) { current->nr_dirtied--; dec_zone_page_state(page, NR_DIRTIED); - dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED); } } EXPORT_SYMBOL(account_page_redirty); @@ -2298,7 +2300,7 @@ int clear_page_dirty_for_io(struct page *page) */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); - dec_bdi_stat(mapping->backing_dev_info, + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); return 1; } @@ -2316,7 +2318,7 @@ int test_clear_page_writeback(struct page *page) memcg = mem_cgroup_begin_page_stat(page); if (mapping) { - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); @@ -2351,7 +2353,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write) memcg = mem_cgroup_begin_page_stat(page); if (mapping) { - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); @@ -2405,12 +2407,7 @@ EXPORT_SYMBOL(mapping_tagged); */ void wait_for_stable_page(struct page *page) { - struct address_space *mapping = page_mapping(page); - struct backing_dev_info *bdi = mapping->backing_dev_info; - - if (!bdi_cap_stable_pages_required(bdi)) - return; - - wait_on_page_writeback(page); + if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host))) + wait_on_page_writeback(page); } EXPORT_SYMBOL_GPL(wait_for_stable_page); diff --git a/mm/readahead.c b/mm/readahead.c index 17b9172ec37f..935675844b2e 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -27,7 +27,7 @@ void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) { - ra->ra_pages = mapping->backing_dev_info->ra_pages; + ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; ra->prev_pos = -1; } EXPORT_SYMBOL_GPL(file_ra_state_init); @@ -541,7 +541,7 @@ page_cache_async_readahead(struct address_space *mapping, /* * Defer asynchronous read-ahead on IO congestion. */ - if (bdi_read_congested(mapping->backing_dev_info)) + if (bdi_read_congested(inode_to_bdi(mapping->host))) return; /* do read-ahead */ diff --git a/mm/shmem.c b/mm/shmem.c index 864c878401e6..a63031fa3e0c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -191,11 +191,6 @@ static const struct inode_operations shmem_dir_inode_operations; static const struct inode_operations shmem_special_inode_operations; static const struct vm_operations_struct shmem_vm_ops; -static struct backing_dev_info shmem_backing_dev_info __read_mostly = { - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, -}; - static LIST_HEAD(shmem_swaplist); static DEFINE_MUTEX(shmem_swaplist_mutex); @@ -765,11 +760,11 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) goto redirty; /* - * shmem_backing_dev_info's capabilities prevent regular writeback or - * sync from ever calling shmem_writepage; but a stacking filesystem - * might use ->writepage of its underlying filesystem, in which case - * tmpfs should write out to swap only in response to memory pressure, - * and not for the writeback threads or sync. + * Our capabilities prevent regular writeback or sync from ever calling + * shmem_writepage; but a stacking filesystem might use ->writepage of + * its underlying filesystem, in which case tmpfs should write out to + * swap only in response to memory pressure, and not for the writeback + * threads or sync. */ if (!wbc->for_reclaim) { WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ @@ -1415,7 +1410,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_blocks = 0; - inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_generation = get_seconds(); info = SHMEM_I(inode); @@ -1461,7 +1455,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode bool shmem_mapping(struct address_space *mapping) { - return mapping->backing_dev_info == &shmem_backing_dev_info; + return mapping->host->i_sb->s_op == &shmem_ops; } #ifdef CONFIG_TMPFS @@ -3225,10 +3219,6 @@ int __init shmem_init(void) if (shmem_inode_cachep) return 0; - error = bdi_init(&shmem_backing_dev_info); - if (error) - goto out4; - error = shmem_init_inodecache(); if (error) goto out3; @@ -3252,8 +3242,6 @@ out1: out2: shmem_destroy_inodecache(); out3: - bdi_destroy(&shmem_backing_dev_info); -out4: shm_mnt = ERR_PTR(error); return error; } diff --git a/mm/swap.c b/mm/swap.c index 5b3087228b99..cd3a5e64cea9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1138,8 +1138,6 @@ void __init swap_setup(void) #ifdef CONFIG_SWAP int i; - if (bdi_init(swapper_spaces[0].backing_dev_info)) - panic("Failed to init swap bdi"); for (i = 0; i < MAX_SWAPFILES; i++) spin_lock_init(&swapper_spaces[i].tree_lock); #endif diff --git a/mm/swap_state.c b/mm/swap_state.c index 9711342987a0..405923f77334 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -32,17 +32,11 @@ static const struct address_space_operations swap_aops = { #endif }; -static struct backing_dev_info swap_backing_dev_info = { - .name = "swap", - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, -}; - struct address_space swapper_spaces[MAX_SWAPFILES] = { [0 ... MAX_SWAPFILES - 1] = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), .i_mmap_writable = ATOMIC_INIT(0), .a_ops = &swap_aops, - .backing_dev_info = &swap_backing_dev_info, } }; diff --git a/mm/truncate.c b/mm/truncate.c index f1e4d6052369..ddec5a5966d7 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -112,7 +112,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size) struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); - dec_bdi_stat(mapping->backing_dev_info, + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); if (account_size) task_io_account_cancelled_write(account_size); diff --git a/mm/vmscan.c b/mm/vmscan.c index 8e645ee52045..224dd298fdcd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -500,7 +500,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; - if (!may_write_to_queue(mapping->backing_dev_info, sc)) + if (!may_write_to_queue(inode_to_bdi(mapping->host), sc)) return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { @@ -879,7 +879,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ mapping = page_mapping(page); if (((dirty || writeback) && mapping && - bdi_write_congested(mapping->backing_dev_info)) || + bdi_write_congested(inode_to_bdi(mapping->host))) || (writeback && PageReclaim(page))) nr_congested++; |