summaryrefslogtreecommitdiffstats
path: root/fs/locks.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:05 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:05 -0500
commit47831f35b83e43c804215712dd0c834c92e8a441 (patch)
tree3345007cf56b73495386a59deb6e3d7caf01f875 /fs/locks.c
parent1dd594b21b2d98e56f2b1fe92bb222276b28de41 (diff)
downloadlinux-47831f35b83e43c804215712dd0c834c92e8a441.tar.gz
linux-47831f35b83e43c804215712dd0c834c92e8a441.tar.bz2
linux-47831f35b83e43c804215712dd0c834c92e8a441.zip
VFS: Fix __posix_lock_file() copy of private lock area
The struct file_lock->fl_u area must be copied using the fl_copy_lock() operation. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/locks.c')
-rw-r--r--fs/locks.c53
1 files changed, 36 insertions, 17 deletions
diff --git a/fs/locks.c b/fs/locks.c
index 909eab8fb1d0..d2c5306e3db0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -153,6 +153,21 @@ static struct file_lock *locks_alloc_lock(void)
return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
}
+static void locks_release_private(struct file_lock *fl)
+{
+ if (fl->fl_ops) {
+ if (fl->fl_ops->fl_release_private)
+ fl->fl_ops->fl_release_private(fl);
+ fl->fl_ops = NULL;
+ }
+ if (fl->fl_lmops) {
+ if (fl->fl_lmops->fl_release_private)
+ fl->fl_lmops->fl_release_private(fl);
+ fl->fl_lmops = NULL;
+ }
+
+}
+
/* Free a lock which is not in use. */
static void locks_free_lock(struct file_lock *fl)
{
@@ -169,18 +184,7 @@ static void locks_free_lock(struct file_lock *fl)
if (!list_empty(&fl->fl_link))
panic("Attempting to free lock on active lock list");
- if (fl->fl_ops) {
- if (fl->fl_ops->fl_release_private)
- fl->fl_ops->fl_release_private(fl);
- fl->fl_ops = NULL;
- }
-
- if (fl->fl_lmops) {
- if (fl->fl_lmops->fl_release_private)
- fl->fl_lmops->fl_release_private(fl);
- fl->fl_lmops = NULL;
- }
-
+ locks_release_private(fl);
kmem_cache_free(filelock_cache, fl);
}
@@ -218,11 +222,27 @@ static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
locks_init_lock(lock);
}
+static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
+{
+ if (fl->fl_ops) {
+ if (fl->fl_ops->fl_copy_lock)
+ fl->fl_ops->fl_copy_lock(new, fl);
+ new->fl_ops = fl->fl_ops;
+ }
+ if (fl->fl_lmops) {
+ if (fl->fl_lmops->fl_copy_lock)
+ fl->fl_lmops->fl_copy_lock(new, fl);
+ new->fl_lmops = fl->fl_lmops;
+ }
+}
+
/*
* Initialize a new lock from an existing file_lock structure.
*/
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
+ locks_release_private(new);
+
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
new->fl_file = fl->fl_file;
@@ -232,10 +252,8 @@ void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
new->fl_end = fl->fl_end;
new->fl_ops = fl->fl_ops;
new->fl_lmops = fl->fl_lmops;
- if (fl->fl_ops && fl->fl_ops->fl_copy_lock)
- fl->fl_ops->fl_copy_lock(new, fl);
- if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock)
- fl->fl_lmops->fl_copy_lock(new, fl);
+
+ locks_copy_private(new, fl);
}
EXPORT_SYMBOL(locks_copy_lock);
@@ -904,7 +922,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
fl->fl_start = request->fl_start;
fl->fl_end = request->fl_end;
fl->fl_type = request->fl_type;
- fl->fl_u = request->fl_u;
+ locks_release_private(fl);
+ locks_copy_private(fl, request);
request = fl;
added = 1;
}