summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-10-30 13:22:14 -0400
committerChris Mason <chris.mason@oracle.com>2008-10-30 13:22:14 -0400
commitcfbc246eaae2a1089911016094b74b3055e8a906 (patch)
treec450f02cd605b38a578778dacd9c8768ce041789
parent87ef2bb46bfc4be0b40799e68115cbe28d80a1bd (diff)
downloadlinux-stable-cfbc246eaae2a1089911016094b74b3055e8a906.tar.gz
linux-stable-cfbc246eaae2a1089911016094b74b3055e8a906.tar.bz2
linux-stable-cfbc246eaae2a1089911016094b74b3055e8a906.zip
Btrfs: walk compressed pages based on the nr_pages count instead of bytes
The byte walk counting was awkward and error prone. This uses the number of pages sent the higher layer to build bios. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/compression.c7
-rw-r--r--fs/btrfs/inode.c2
2 files changed, 6 insertions, 3 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c5470367ca5c..9adaa79adad9 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -296,7 +296,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
/* create and submit bios for the compressed pages */
bytes_left = compressed_len;
- while(bytes_left > 0) {
+ for (page_index = 0; page_index < cb->nr_pages; page_index++) {
page = compressed_pages[page_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
@@ -324,7 +324,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
}
- page_index++;
+ if (bytes_left < PAGE_CACHE_SIZE) {
+ printk("bytes left %lu compress len %lu nr %lu\n",
+ bytes_left, cb->compressed_len, cb->nr_pages);
+ }
bytes_left -= PAGE_CACHE_SIZE;
first_byte += PAGE_CACHE_SIZE;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9797592dc86b..6739424c0fe6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -338,7 +338,7 @@ again:
if (!btrfs_test_flag(inode, NOCOMPRESS) &&
btrfs_test_opt(root, COMPRESS)) {
WARN_ON(pages);
- pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+ pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
/* we want to make sure the amount of IO required to satisfy
* a random read is reasonably small, so we limit the size