summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2020-01-16 20:12:26 +0100
committerAndreas Gruenbacher <agruenba@redhat.com>2020-06-05 20:19:21 +0200
commita0e3cc65fa29f497cc97a069c318532c2a48d148 (patch)
treecd8d55687fc9ff76fc46896c3559ac77252903c2 /fs/gfs2/glock.c
parentf286d627ef026a4d04b41ae5917d58ddf243c3c5 (diff)
downloadlinux-a0e3cc65fa29f497cc97a069c318532c2a48d148.tar.gz
linux-a0e3cc65fa29f497cc97a069c318532c2a48d148.tar.bz2
linux-a0e3cc65fa29f497cc97a069c318532c2a48d148.zip
gfs2: Turn gl_delete into a delayed work
This requires flushing delayed work items in gfs2_make_fs_ro (which is called before unmounting a filesystem). When inodes are deleted and then recreated, pending gl_delete work items would have no effect because the inode generations will have changed, so we can cancel any pending gl_delete works before reusing iopen glocks. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c47
1 files changed, 45 insertions, 2 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 12681616eb76..0332086f7ab9 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -776,11 +776,16 @@ bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
static void delete_work_func(struct work_struct *work)
{
- struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
+ spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+
/* If someone's using this glock to create a new dinode, the block must
have been freed by another node, then re-used, in which case our
iopen callback is too late after the fact. Ignore it. */
@@ -949,7 +954,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
- INIT_WORK(&gl->gl_delete, delete_work_func);
+ INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
mapping = gfs2_glock2aspace(gl);
if (mapping) {
@@ -1772,6 +1777,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
rhashtable_walk_exit(&iter);
}
+bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
+{
+ bool queued;
+
+ spin_lock(&gl->gl_lockref.lock);
+ queued = queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, delay);
+ if (queued)
+ set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+ return queued;
+}
+
+void gfs2_cancel_delete_work(struct gfs2_glock *gl)
+{
+ if (cancel_delayed_work_sync(&gl->gl_delete)) {
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ gfs2_glock_put(gl);
+ }
+}
+
+bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
+{
+ return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+}
+
+static void flush_delete_work(struct gfs2_glock *gl)
+{
+ flush_delayed_work(&gl->gl_delete);
+ gfs2_glock_queue_work(gl, 0);
+}
+
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
+{
+ glock_hash_walk(flush_delete_work, sdp);
+ flush_workqueue(gfs2_delete_workqueue);
+}
+
/**
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
* @gl: The glock to thaw