summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 12:38:12 -0400
committerArnd Bergmann <arnd@arndb.de>2010-10-27 22:06:17 +0200
commitf7347ce4ee7c65415f84be915c018473e7076f31 (patch)
tree613ce14f088ad00bdbc77cdfb686a40a4851180f /fs
parentc5b1f0d92c36851aca09ac6c7c0c4f9690ac14f3 (diff)
downloadlinux-f7347ce4ee7c65415f84be915c018473e7076f31.tar.gz
linux-f7347ce4ee7c65415f84be915c018473e7076f31.tar.bz2
linux-f7347ce4ee7c65415f84be915c018473e7076f31.zip
fasync: re-organize fasync entry insertion to allow it under a spinlock
You currently cannot use "fasync_helper()" in an atomic environment to insert a new fasync entry, because it will need to allocate the new "struct fasync_struct". Yet fcntl_setlease() wants to call this under lock_flocks(), which is in the process of being converted from the BKL to a spinlock. In order to fix this, this abstracts out the actual fasync list insertion and the fasync allocations into functions of their own, and teaches fs/locks.c to pre-allocate the fasync_struct entry. That way the actual list insertion can happen while holding the required spinlock. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [bfields@redhat.com: rebase on top of my changes to Arnd's patch] Tested-by: J. Bruce Fields <bfields@redhat.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'fs')
-rw-r--r--fs/fcntl.c66
-rw-r--r--fs/locks.c18
2 files changed, 67 insertions, 17 deletions
diff --git a/fs/fcntl.c b/fs/fcntl.c
index f8cc34f542c3..dcdbc6f5c33b 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head)
* match the state "is the filp on a fasync list".
*
*/
-static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
+int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
int result = 0;
@@ -666,21 +666,28 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
return result;
}
+struct fasync_struct *fasync_alloc(void)
+{
+ return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
+}
+
/*
- * Add a fasync entry. Return negative on error, positive if
- * added, and zero if did nothing but change an existing one.
- *
- * NOTE! It is very important that the FASYNC flag always
- * match the state "is the filp on a fasync list".
+ * NOTE! This can be used only for unused fasync entries:
+ * entries that actually got inserted on the fasync list
+ * need to be released by rcu - see fasync_remove_entry.
*/
-static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+void fasync_free(struct fasync_struct *new)
{
- struct fasync_struct *new, *fa, **fp;
- int result = 0;
+ kmem_cache_free(fasync_cache, new);
+}
- new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
+/*
+ * Insert a new entry into the fasync list. Return the pointer to the
+ * old one if we didn't use the new one.
+ */
+struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
+{
+ struct fasync_struct *fa, **fp;
spin_lock(&filp->f_lock);
spin_lock(&fasync_lock);
@@ -691,8 +698,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
spin_lock_irq(&fa->fa_lock);
fa->fa_fd = fd;
spin_unlock_irq(&fa->fa_lock);
-
- kmem_cache_free(fasync_cache, new);
goto out;
}
@@ -702,13 +707,42 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
new->fa_fd = fd;
new->fa_next = *fapp;
rcu_assign_pointer(*fapp, new);
- result = 1;
filp->f_flags |= FASYNC;
out:
spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
- return result;
+ return fa;
+}
+
+/*
+ * Add a fasync entry. Return negative on error, positive if
+ * added, and zero if did nothing but change an existing one.
+ *
+ * NOTE! It is very important that the FASYNC flag always
+ * match the state "is the filp on a fasync list".
+ */
+static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+{
+ struct fasync_struct *new;
+
+ new = fasync_alloc();
+ if (!new)
+ return -ENOMEM;
+
+ /*
+ * fasync_insert_entry() returns the old (update) entry if
+ * it existed.
+ *
+ * So free the (unused) new entry and return 0 to let the
+ * caller know that we didn't add any new fasync entries.
+ */
+ if (fasync_insert_entry(fd, filp, fapp, new)) {
+ fasync_free(new);
+ return 0;
+ }
+
+ return 1;
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index 0391d2ff5a4e..85fd9ce1abae 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1505,6 +1505,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
struct file_lock *fl;
+ struct fasync_struct *new;
struct inode *inode = filp->f_path.dentry->d_inode;
int error;
@@ -1512,12 +1513,25 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
if (IS_ERR(fl))
return PTR_ERR(fl);
+ new = fasync_alloc();
+ if (!new) {
+ locks_free_lock(fl);
+ return -ENOMEM;
+ }
lock_flocks();
error = __vfs_setlease(filp, arg, &fl);
if (error || arg == F_UNLCK)
goto out_unlock;
- error = fasync_helper(fd, filp, 1, &fl->fl_fasync);
+ /*
+ * fasync_insert_entry() returns the old entry if any.
+ * If there was no old entry, then it used 'new' and
+ * inserted it into the fasync list. Clear new so that
+ * we don't release it here.
+ */
+ if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
+ new = NULL;
+
if (error < 0) {
/* remove lease just inserted by setlease */
fl->fl_type = F_UNLCK | F_INPROGRESS;
@@ -1529,6 +1543,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
out_unlock:
unlock_flocks();
+ if (new)
+ fasync_free(new);
return error;
}