summaryrefslogtreecommitdiffstats
path: root/fs/libfs.c
diff options
context:
space:
mode:
authorJoel Becker <joel.becker@oracle.com>2010-08-16 12:10:17 -0700
committerJoel Becker <joel.becker@oracle.com>2010-09-10 08:42:48 -0700
commita33f13efe05192e7a805018a2ce2b2afddd04057 (patch)
treeeb2f8404c82f1464086c4bd3ddef82dcc1db71c8 /fs/libfs.c
parent3bdb8efd94a73bb137e3315cd831cbc874052b4b (diff)
downloadlinux-stable-a33f13efe05192e7a805018a2ce2b2afddd04057.tar.gz
linux-stable-a33f13efe05192e7a805018a2ce2b2afddd04057.tar.bz2
linux-stable-a33f13efe05192e7a805018a2ce2b2afddd04057.zip
libfs: Fix shift bug in generic_check_addressable()
generic_check_addressable() erroneously shifts pages down by a block factor when it should be shifting up. To prevent overflow, we shift blocks down to pages. Signed-off-by: Joel Becker <joel.becker@oracle.com>
Diffstat (limited to 'fs/libfs.c')
-rw-r--r--fs/libfs.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/libfs.c b/fs/libfs.c
index 8debe7b33769..62baa0387d6e 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -925,6 +925,8 @@ EXPORT_SYMBOL(generic_file_fsync);
int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
{
u64 last_fs_block = num_blocks - 1;
+ u64 last_fs_page =
+ last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits);
if (unlikely(num_blocks == 0))
return 0;
@@ -932,10 +934,8 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT))
return -EINVAL;
- if ((last_fs_block >
- (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
- (last_fs_block >
- (pgoff_t)(~0ULL) >> (PAGE_CACHE_SHIFT - blocksize_bits))) {
+ if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
+ (last_fs_page > (pgoff_t)(~0ULL))) {
return -EFBIG;
}
return 0;