summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2012-06-08 15:16:12 -0400
committerChris Mason <chris.mason@oracle.com>2012-06-14 21:30:53 -0400
commit8180ef8894fa402443205cff1e23417e8d3434df (patch)
treef3bb1836d96049cb8da52666805fe5b8b4915eed
parent9c5085c147989d48dfe74194b48affc23f376650 (diff)
downloadlinux-8180ef8894fa402443205cff1e23417e8d3434df.tar.gz
linux-8180ef8894fa402443205cff1e23417e8d3434df.tar.bz2
linux-8180ef8894fa402443205cff1e23417e8d3434df.zip
Btrfs: keep inode pinned when compressing writes
A user reported lots of problems using compression on the new code and it turns out part of the problem was that igrab() was failing when we added a new ordered extent. This is because when writing out an inode under compression we immediately return without actually doing anything to the pages, and then in another thread at some point down the line actually do the ordered dance. The problem is between the point that we start writeback and we actually add the ordered extent we could be trying to reclaim the inode, which makes igrab() return NULL. So we need to do an igrab() when we create the async extent and then drop it when we are done with it. This makes sure we stay pinned in memory until the ordered extent can get a reference on it and we are good to go. With this patch we no longer panic in btrfs_finish_ordered_io(). Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
-rw-r--r--fs/btrfs/inode.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b7f398c36cb7..06075043da5d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -986,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work)
compress_file_range(async_cow->inode, async_cow->locked_page,
async_cow->start, async_cow->end, async_cow,
&num_added);
- if (num_added == 0)
+ if (num_added == 0) {
+ iput(async_cow->inode);
async_cow->inode = NULL;
+ }
}
/*
@@ -1020,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
{
struct async_cow *async_cow;
async_cow = container_of(work, struct async_cow, work);
+ if (async_cow->inode)
+ iput(async_cow->inode);
kfree(async_cow);
}
@@ -1038,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
- async_cow->inode = inode;
+ async_cow->inode = igrab(inode);
async_cow->root = root;
async_cow->locked_page = locked_page;
async_cow->start = start;