summaryrefslogtreecommitdiffstats
path: root/fs/nfs/nfs4state.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2010-09-18 15:09:31 +0200
committerArnd Bergmann <arnd@arndb.de>2010-10-05 11:02:04 +0200
commitb89f432133851a01c0d28822f11cbdcc15781a75 (patch)
treea3e9ba638a9b746985148f4525335d360ec7da56 /fs/nfs/nfs4state.c
parent2e54eb96e2c801f33d95b5dade15212ac4d6c4a5 (diff)
downloadlinux-stable-b89f432133851a01c0d28822f11cbdcc15781a75.tar.gz
linux-stable-b89f432133851a01c0d28822f11cbdcc15781a75.tar.bz2
linux-stable-b89f432133851a01c0d28822f11cbdcc15781a75.zip
fs/locks.c: prepare for BKL removal
This prepares the removal of the big kernel lock from the file locking code. We still use the BKL as long as fs/lockd uses it and ceph might sleep, but we can flip the definition to a private spinlock as soon as that's done. All users outside of fs/lockd get converted to use lock_flocks() instead of lock_kernel() where appropriate. Based on an earlier patch to use a spinlock from Matthew Wilcox, who has attempted this a few times before, the earliest patch from over 10 years ago turned it into a semaphore, which ended up being slower than the BKL and was subsequently reverted. Someone should do some serious performance testing when this becomes a spinlock, since this has caused problems before. Using a spinlock should be at least as good as the BKL in theory, but who knows... Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Matthew Wilcox <willy@linux.intel.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: "J. Bruce Fields" <bfields@fieldses.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Miklos Szeredi <mszeredi@suse.cz> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: John Kacur <jkacur@redhat.com> Cc: Sage Weil <sage@newdream.net> Cc: linux-kernel@vger.kernel.org Cc: linux-fsdevel@vger.kernel.org
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r--fs/nfs/nfs4state.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3e2f19b04c06..96524c5dca6b 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -40,7 +40,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/fs.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include <linux/kthread.h>
@@ -970,13 +970,13 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
/* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
/* Protect inode->i_flock using the BKL */
- lock_kernel();
+ lock_flocks();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
- unlock_kernel();
+ unlock_flocks();
status = ops->recover_lock(state, fl);
switch (status) {
case 0:
@@ -1003,9 +1003,9 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
status = 0;
}
- lock_kernel();
+ lock_flocks();
}
- unlock_kernel();
+ unlock_flocks();
out:
up_write(&nfsi->rwsem);
return status;