summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/super.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2017-08-01 11:45:23 -0500
committerBob Peterson <rpeterso@redhat.com>2017-08-10 10:45:21 -0500
commit71c1b2136835c88c231f7a5e3dc618f7568f84f7 (patch)
tree73a9839b924775b9e91e47a7f5a1288cc583d31c /fs/gfs2/super.c
parenteebd2e813f7ef688e22cd0b68aea78fb3d1ef19c (diff)
downloadlinux-71c1b2136835c88c231f7a5e3dc618f7568f84f7.tar.gz
linux-71c1b2136835c88c231f7a5e3dc618f7568f84f7.tar.bz2
linux-71c1b2136835c88c231f7a5e3dc618f7568f84f7.zip
gfs2: gfs2_evict_inode: Put glocks asynchronously
gfs2_evict_inode is called to free inodes under memory pressure. The function calls into DLM when an inode's last cluster-wide reference goes away (remote unlink) and to release the glock and associated DLM lock before finally destroying the inode. However, if DLM is blocked on memory to become available, calling into DLM again will deadlock. Avoid that by decoupling releasing glocks from destroying inodes in that case: with gfs2_glock_queue_put, glocks will be dequeued asynchronously in work queue context, when the associated inodes have likely already been destroyed. With this change, inodes can end up being unlinked, remote-unlink can be triggered, and then the inode can be reallocated before all remote-unlink callbacks are processed. To detect that, revalidate the link count in gfs2_evict_inode to make sure we're not deleting an allocated, referenced inode. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs/gfs2/super.c')
-rw-r--r--fs/gfs2/super.c30
1 files changed, 28 insertions, 2 deletions
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6c39bb1ec100..4089dbe617a6 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1502,6 +1502,22 @@ out_qs:
}
/**
+ * gfs2_glock_put_eventually
+ * @gl: The glock to put
+ *
+ * When under memory pressure, trigger a deferred glock put to make sure we
+ * won't call into DLM and deadlock. Otherwise, put the glock directly.
+ */
+
+static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
+{
+ if (current->flags & PF_MEMALLOC)
+ gfs2_glock_queue_put(gl);
+ else
+ gfs2_glock_put(gl);
+}
+
+/**
* gfs2_evict_inode - Remove an inode from cache
* @inode: The inode to evict
*
@@ -1564,6 +1580,12 @@ static void gfs2_evict_inode(struct inode *inode)
goto out_truncate;
}
+ /*
+ * The inode may have been recreated in the meantime.
+ */
+ if (inode->i_nlink)
+ goto out_truncate;
+
alloc_failed:
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
@@ -1653,12 +1675,16 @@ out:
glock_clear_object(ip->i_gl, ip);
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl);
- gfs2_glock_put(ip->i_gl);
+ gfs2_glock_put_eventually(ip->i_gl);
ip->i_gl = NULL;
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+ struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
+
+ glock_clear_object(gl, ip);
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+ gfs2_glock_hold(gl);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+ gfs2_glock_put_eventually(gl);
}
}