summaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2015-03-04 13:40:00 -0500
committerJosef Bacik <jbacik@fb.com>2015-08-17 18:39:47 -0400
commite97fedb9ef9868ff24d588be781906cf7c1b59ae (patch)
treed686280c3fef0894125e9ba6de9f7797f6518084 /fs/fs-writeback.c
parent74278da9f70d84d715601fe794567a6d2bfdf078 (diff)
downloadlinux-stable-e97fedb9ef9868ff24d588be781906cf7c1b59ae.tar.gz
linux-stable-e97fedb9ef9868ff24d588be781906cf7c1b59ae.tar.bz2
linux-stable-e97fedb9ef9868ff24d588be781906cf7c1b59ae.zip
sync: serialise per-superblock sync operations
When competing sync(2) calls walk the same filesystem, they need to walk the list of inodes on the superblock to find all the inodes that we need to wait for IO completion on. However, when multiple wait_sb_inodes() calls do this at the same time, they contend on the the inode_sb_list_lock and the contention causes system wide slowdowns. In effect, concurrent sync(2) calls can take longer and burn more CPU than if they were serialised. Stop the worst of the contention by adding a per-sb mutex to wrap around wait_sb_inodes() so that we only execute one sync(2) IO completion walk per superblock superblock at a time and hence avoid contention being triggered by concurrent sync(2) calls. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Josef Bacik <jbacik@fb.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f45bf876579f..3c974442bdf0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2114,6 +2114,15 @@ out_unlock_inode:
}
EXPORT_SYMBOL(__mark_inode_dirty);
+/*
+ * The @s_sync_lock is used to serialise concurrent sync operations
+ * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
+ * Concurrent callers will block on the s_sync_lock rather than doing contending
+ * walks. The queueing maintains sync(2) required behaviour as all the IO that
+ * has been issued up to the time this function is enter is guaranteed to be
+ * completed by the time we have gained the lock and waited for all IO that is
+ * in progress regardless of the order callers are granted the lock.
+ */
static void wait_sb_inodes(struct super_block *sb)
{
struct inode *inode, *old_inode = NULL;
@@ -2124,6 +2133,7 @@ static void wait_sb_inodes(struct super_block *sb)
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
+ mutex_lock(&sb->s_sync_lock);
spin_lock(&sb->s_inode_list_lock);
/*
@@ -2165,6 +2175,7 @@ static void wait_sb_inodes(struct super_block *sb)
}
spin_unlock(&sb->s_inode_list_lock);
iput(old_inode);
+ mutex_unlock(&sb->s_sync_lock);
}
static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,