summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-01-25 15:10:08 -0500
committerJosef Bacik <josef@redhat.com>2011-03-17 14:21:16 -0400
commit4a64001f0047956e283f7ada9843dfc3f3b5d8c8 (patch)
treef45d7b09808ef70cf31e7da4e5e8b1c0c7521f4a /fs/btrfs
parentd0215f3e5ebb5803cd6ec067b10c5e00a3ad7cfc (diff)
downloadlinux-4a64001f0047956e283f7ada9843dfc3f3b5d8c8.tar.gz
linux-4a64001f0047956e283f7ada9843dfc3f3b5d8c8.tar.bz2
linux-4a64001f0047956e283f7ada9843dfc3f3b5d8c8.zip
Btrfs: fix how we deal with the pages array in the write path
Really we don't need to memset the pages array at all, since we know how many pages we're going to use in the array and pass that around. So don't memset, just trust we're not idiots and we pass num_pages around properly. Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/file.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f2a80e570a6c..24a19c2743ca 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -108,8 +108,6 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
- if (!pages[i])
- break;
/* page checked is some magic around finding pages that
* have been modified without going through btrfs_set_page_dirty
* clear it here
@@ -824,7 +822,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
return err;
}
- memset(pages, 0, num_pages * sizeof(struct page *));
again:
for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i);
@@ -930,7 +927,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t copied;
WARN_ON(num_pages > nrptrs);
- memset(pages, 0, sizeof(struct page *) * nrptrs);
/*
* Fault pages before locking them in prepare_pages
@@ -946,6 +942,11 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
if (ret)
break;
+ /*
+ * This is going to setup the pages array with the number of
+ * pages we want, so we don't really need to worry about the
+ * contents of pages from loop to loop
+ */
ret = prepare_pages(root, file, pages, num_pages,
pos, first_index, last_index,
write_bytes);