summaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@suse.cz>2015-07-01 16:26:03 +0200
committerMiklos Szeredi <mszeredi@suse.cz>2015-07-01 16:26:03 +0200
commit8f7bb368dbdda76f5e98e05ee49ae2dc138fd42f (patch)
treeaea509906f365c33f54070780905f0c45b18a084 /fs/fuse
parent4ce6081260ea4c1b5bfa8ecca5cbb93eea279ad4 (diff)
downloadlinux-stable-8f7bb368dbdda76f5e98e05ee49ae2dc138fd42f.tar.gz
linux-stable-8f7bb368dbdda76f5e98e05ee49ae2dc138fd42f.tar.bz2
linux-stable-8f7bb368dbdda76f5e98e05ee49ae2dc138fd42f.zip
fuse: allow interrupt queuing without fc->lock
Interrupt is only queued after the request has been sent to userspace. This is either done in request_wait_answer() or fuse_dev_do_read() depending on which state the request is in at the time of the interrupt. If it's not yet sent, then queuing the interrupt is postponed until the request is read. Otherwise (the request has already been read and is waiting for an answer) the interrupt is queued immedidately. We want to call queue_interrupt() without fc->lock protection, in which case there can be a race between the two functions: - neither of them queue the interrupt (thinking the other one has already done it). - both of them queue the interrupt The first one is prevented by adding memory barriers, the second is prevented by checking (under fiq->waitq.lock) if the interrupt has already been queued. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/dev.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 65ad9b1e055d..c7f1a633239f 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -423,8 +423,10 @@ __releases(fc->lock)
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->waitq.lock);
- list_add_tail(&req->intr_entry, &fiq->interrupts);
- wake_up_locked(&fiq->waitq);
+ if (list_empty(&req->intr_entry)) {
+ list_add_tail(&req->intr_entry, &fiq->interrupts);
+ wake_up_locked(&fiq->waitq);
+ }
spin_unlock(&fiq->waitq.lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
}
@@ -443,6 +445,8 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
spin_lock(&fc->lock);
set_bit(FR_INTERRUPTED, &req->flags);
+ /* matches barrier in fuse_dev_do_read() */
+ smp_mb__after_atomic();
if (test_bit(FR_SENT, &req->flags))
queue_interrupt(fiq, req);
spin_unlock(&fc->lock);
@@ -1358,8 +1362,10 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
if (!test_bit(FR_ISREPLY, &req->flags)) {
request_end(fc, req);
} else {
- set_bit(FR_SENT, &req->flags);
list_move_tail(&req->list, &fc->processing);
+ set_bit(FR_SENT, &req->flags);
+ /* matches barrier in request_wait_answer() */
+ smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req);
spin_unlock(&fc->lock);