diff options
author | Jeff Layton <jlayton@redhat.com> | 2013-06-21 08:58:16 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-06-29 12:57:43 +0400 |
commit | 4e8c765d384e549f9b542ea0bd42e2aa227e1404 (patch) | |
tree | 23600131f3c0e8fa0d84707ccb19344f14627804 /fs/locks.c | |
parent | 1c8c601a8c0dc59fe64907dcd9d512a3d181ddc7 (diff) | |
download | linux-stable-4e8c765d384e549f9b542ea0bd42e2aa227e1404.tar.gz linux-stable-4e8c765d384e549f9b542ea0bd42e2aa227e1404.tar.bz2 linux-stable-4e8c765d384e549f9b542ea0bd42e2aa227e1404.zip |
locks: avoid taking global lock if possible when waking up blocked waiters
Since we always hold the i_lock when inserting a new waiter onto the
fl_block list, we can avoid taking the global lock at all if we find
that it's empty when we go to wake up blocked waiters.
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/locks.c')
-rw-r--r-- | fs/locks.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/locks.c b/fs/locks.c index ce302d43822b..84e269fc4c69 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -548,7 +548,10 @@ static void locks_delete_block(struct file_lock *waiter) * the order they blocked. The documentation doesn't require this but * it seems like the reasonable thing to do. * - * Must be called with file_lock_lock held! + * Must be called with both the i_lock and file_lock_lock held. The fl_block + * list itself is protected by the file_lock_list, but by ensuring that the + * i_lock is also held on insertions we can avoid taking the file_lock_lock + * in some cases when we see that the fl_block list is empty. */ static void __locks_insert_block(struct file_lock *blocker, struct file_lock *waiter) @@ -576,6 +579,16 @@ static void locks_insert_block(struct file_lock *blocker, */ static void locks_wake_up_blocks(struct file_lock *blocker) { + /* + * Avoid taking global lock if list is empty. This is safe since new + * blocked requests are only added to the list under the i_lock, and + * the i_lock is always held here. Note that removal from the fl_block + * list does not require the i_lock, so we must recheck list_empty() + * after acquiring the file_lock_lock. + */ + if (list_empty(&blocker->fl_block)) + return; + spin_lock(&file_lock_lock); while (!list_empty(&blocker->fl_block)) { struct file_lock *waiter; |