summaryrefslogtreecommitdiffstats
path: root/fs/ntfs
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 15:29:48 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 10:41:08 -0700
commitea1754a084760e68886f5b725c8eaada9cc57155 (patch)
tree2e14936a959a661ee68d4490cb9b82b94bb27ab9 /fs/ntfs
parent09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (diff)
downloadlinux-ea1754a084760e68886f5b725c8eaada9cc57155.tar.gz
linux-ea1754a084760e68886f5b725c8eaada9cc57155.tar.bz2
linux-ea1754a084760e68886f5b725c8eaada9cc57155.zip
mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage
Mostly direct substitution with occasional adjustment or removing outdated comments. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ntfs')
-rw-r--r--fs/ntfs/aops.c2
-rw-r--r--fs/ntfs/aops.h2
-rw-r--r--fs/ntfs/compress.c21
-rw-r--r--fs/ntfs/dir.c16
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ntfs/index.c2
-rw-r--r--fs/ntfs/inode.c4
-rw-r--r--fs/ntfs/super.c14
8 files changed, 27 insertions, 36 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index a474e7ef92ea..97768a1379f2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
// in the inode.
// Again, for each page do:
// __set_page_dirty_buffers();
- // page_cache_release()
+ // put_page()
// We don't need to wait on the writes.
// Update iblock.
}
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 37cd7e45dcbc..820d6eabf60f 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page)
* @index: index into the page cache for @mapping of the page to map
*
* Read a page from the page cache of the address space @mapping at position
- * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
+ * @index, where @index is in units of PAGE_SIZE, and not in bytes.
*
* If the page is not in memory it is loaded from disk first using the readpage
* method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index b6074a56661b..f2b5e746f49b 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -105,10 +105,6 @@ static void zero_partial_compressed_page(struct page *page,
ntfs_debug("Zeroing page region outside initialized size.");
if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
- /*
- * FIXME: Using clear_page() will become wrong when we get
- * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
- */
clear_page(kp);
return;
}
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
* @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
* completed during the decompression of the compression block (@cb_start).
*
- * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
+ * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
* unpredicatbly! You have been warned!
*
* Note to hackers: This function may not sleep until it has finished accessing
@@ -462,7 +458,7 @@ return_overflow:
* have been written to so that we would lose data if we were to just overwrite
* them with the out-of-date uncompressed data.
*
- * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
+ * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
* the end of the file I think. We need to detect this case and zero the out
* of bounds remainder of the page in question and mark it as handled. At the
* moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
* clusters so is probably not going to be seen by anyone. Still this should
* be fixed. (AIA)
*
- * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
+ * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
* handling sparse and compressed cbs. (AIA)
*
* FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,12 +493,12 @@ int ntfs_read_compressed_block(struct page *page)
u64 cb_size_mask = cb_size - 1UL;
VCN vcn;
LCN lcn;
- /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
+ /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
* The first vcn after the last wanted vcn (minimum alignment is again
- * PAGE_CACHE_SIZE.
+ * PAGE_SIZE.
*/
VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
& ~cb_size_mask) >> vol->cluster_size_bits;
@@ -753,11 +749,6 @@ lock_retry_remap:
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page) {
- /*
- * FIXME: Using clear_page() will become wrong
- * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
- * for now there is no problem.
- */
if (likely(!cur_ofs))
clear_page(page_address(page));
else
@@ -807,7 +798,7 @@ lock_retry_remap:
* synchronous io for the majority of pages.
* Or if we choose not to do the read-ahead/-behind stuff, we
* could just return block_read_full_page(pages[xpage]) as long
- * as PAGE_CACHE_SIZE <= cb_size.
+ * as PAGE_SIZE <= cb_size.
*/
if (cb_max_ofs)
cb_max_page--;
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 3cdce162592d..a18613579001 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,7 +315,7 @@ found_it:
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
@@ -793,11 +793,11 @@ found_it:
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
- dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+ dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map directory index page, error %ld.",
-PTR_ERR(page));
@@ -809,9 +809,9 @@ descend_into_child_node:
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
- dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+ dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
- if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", dir_ni->mft_no);
goto unm_err_out;
@@ -844,7 +844,7 @@ fast_descend_into_child_node:
goto unm_err_out;
}
index_end = (u8*)ia + dir_ni->itype.index.block_size;
- if (index_end > kaddr + PAGE_CACHE_SIZE) {
+ if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@ found_it2:
/* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */
if (old_vcn << vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT == vcn <<
+ PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT)
+ PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 2dae60857544..91117ada8528 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
* only partially being written to.
*
* If @nr_pages is greater than one, we are guaranteed that the cluster size is
- * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
+ * greater than PAGE_SIZE, that all pages in @pages are entirely inside
* the same cluster and that they are the entirety of that cluster, and that
* the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
*
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 02a83a46ead2..0d645f357930 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,7 +272,7 @@ done:
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 3eda6d4bcc65..f40972d6df90 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -870,7 +870,7 @@ skip_attr_list_load:
}
if (ni->itype.index.block_size > PAGE_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) > "
- "PAGE_CACHE_SIZE (%ld) is not "
+ "PAGE_SIZE (%ld) is not "
"supported. Sorry.",
ni->itype.index.block_size,
PAGE_SIZE);
@@ -1586,7 +1586,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
goto unm_err_out;
}
if (ni->itype.index.block_size > PAGE_SIZE) {
- ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE "
+ ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
"(%ld) is not supported. Sorry.",
ni->itype.index.block_size, PAGE_SIZE);
err = -EOPNOTSUPP;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index ab2b0930054e..ecb49870a680 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,12 +823,12 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
vol->mft_record_size_bits, vol->mft_record_size_bits);
/*
- * We cannot support mft record sizes above the PAGE_CACHE_SIZE since
+ * We cannot support mft record sizes above the PAGE_SIZE since
* we store $MFT/$DATA, the table of mft records in the page cache.
*/
if (vol->mft_record_size > PAGE_SIZE) {
ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
- "PAGE_CACHE_SIZE on your system (%lu). "
+ "PAGE_SIZE on your system (%lu). "
"This is not supported. Sorry.",
vol->mft_record_size, PAGE_SIZE);
return false;
@@ -2471,12 +2471,12 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
down_read(&vol->lcnbmp_lock);
/*
* Convert the number of bits into bytes rounded up, then convert into
- * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
+ * multiples of PAGE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/
max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
- /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+ /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
@@ -2547,7 +2547,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
pgoff_t index;
ntfs_debug("Entering.");
- /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+ /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
"0x%lx.", max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
@@ -2639,7 +2639,7 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
/*
* Convert the maximum number of set bits into bytes rounded up, then
- * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we
+ * convert into multiples of PAGE_SIZE, rounding up so that if we
* have one full and one partial page max_index = 2.
*/
max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
@@ -2765,7 +2765,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
if (!parse_options(vol, (char*)opt))
goto err_out_now;
- /* We support sector sizes up to the PAGE_CACHE_SIZE. */
+ /* We support sector sizes up to the PAGE_SIZE. */
if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
if (!silent)
ntfs_error(sb, "Device has unsupported sector size "