summaryrefslogtreecommitdiffstats
path: root/fs/nfs/nfs4renewd.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-08-07 15:28:33 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-08-07 16:12:50 -0400
commit3d39c691ff486142dd9aaeac12f553f4476b7a62 (patch)
tree78ad555ea06be23933a93ec9c7c68a788f979085 /fs/nfs/nfs4renewd.c
parenta4deb81ba8ece75af5560d40d9bb8d242c48a111 (diff)
downloadlinux-3d39c691ff486142dd9aaeac12f553f4476b7a62.tar.gz
linux-3d39c691ff486142dd9aaeac12f553f4476b7a62.tar.bz2
linux-3d39c691ff486142dd9aaeac12f553f4476b7a62.zip
NFS: Replace flush_scheduled_work with cancel_work_sync() and friends
This will avoid deadlocks of the form: stack backtrace: [<c0104fda>] show_trace_log_lvl+0x1a/0x30 [<c0105c02>] show_trace+0x12/0x20 [<c0105d15>] dump_stack+0x15/0x20 [<c013ee42>] __lock_acquire+0xc22/0x1030 [<c013f2b1>] lock_acquire+0x61/0x80 [<c012edd9>] flush_workqueue+0x49/0x70 [<c012ee0d>] flush_scheduled_work+0xd/0x10 [<dcf55c0c>] nfs_release_automount_timer+0x2c/0x30 [nfs] [<dcf45d8e>] nfs_free_server+0x9e/0xd0 [nfs] [<dcf4e626>] nfs_kill_super+0x16/0x20 [nfs] [<c017b38d>] deactivate_super+0x7d/0xa0 [<c018f94b>] mntput_no_expire+0x4b/0x80 [<c018fd94>] expire_mount_list+0xe4/0x140 [<c0191219>] mark_mounts_for_expiry+0x99/0xb0 [<dcf55d1d>] nfs_expire_automounts+0xd/0x40 [nfs] [<c012e61b>] run_workqueue+0x12b/0x1e0 [<c012f05b>] worker_thread+0x9b/0x100 [<c0131c72>] kthread+0x42/0x70 [<c0104c0f>] kernel_thread_helper+0x7/0x18 ======================= Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/nfs4renewd.c')
-rw-r--r--fs/nfs/nfs4renewd.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 0505ca124034..3ea352d82eba 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -127,16 +127,15 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
void
nfs4_renewd_prepare_shutdown(struct nfs_server *server)
{
- flush_scheduled_work();
+ cancel_delayed_work(&server->nfs_client->cl_renewd);
}
void
nfs4_kill_renewd(struct nfs_client *clp)
{
down_read(&clp->cl_sem);
- cancel_delayed_work(&clp->cl_renewd);
+ cancel_delayed_work_sync(&clp->cl_renewd);
up_read(&clp->cl_sem);
- flush_scheduled_work();
}
/*