summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-08-19 19:50:18 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:39 -0400
commit8e5696698d140f599586426fb9a897abb0eaa576 (patch)
treefde449db19d898ee351a9d9806829dda4ab37b77
parentcd5afabea1acd2bc351ec08d59511302b397f150 (diff)
downloadlinux-stable-8e5696698d140f599586426fb9a897abb0eaa576.tar.gz
linux-stable-8e5696698d140f599586426fb9a897abb0eaa576.tar.bz2
linux-stable-8e5696698d140f599586426fb9a897abb0eaa576.zip
bcachefs: Reorganize btree_locking.[ch]
Tidy things up a bit before doing more work in this file. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_locking.c286
-rw-r--r--fs/bcachefs/btree_locking.h145
2 files changed, 225 insertions, 206 deletions
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 3f20fbcb8389..535232a240dc 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -8,10 +8,12 @@ struct lock_class_key bch2_btree_node_lock_key;
/* Btree node locking: */
-void bch2_btree_node_unlock_write(struct btree_trans *trans,
- struct btree_path *path, struct btree *b)
+static inline void six_lock_readers_add(struct six_lock *lock, int nr)
{
- bch2_btree_node_unlock_write_inlined(trans, path, b);
+ if (!lock->readers)
+ atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
+ else
+ this_cpu_add(*lock->readers, nr);
}
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
@@ -34,14 +36,16 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
return ret;
}
-static inline void six_lock_readers_add(struct six_lock *lock, int nr)
+/* unlock */
+
+void bch2_btree_node_unlock_write(struct btree_trans *trans,
+ struct btree_path *path, struct btree *b)
{
- if (!lock->readers)
- atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
- else
- this_cpu_add(*lock->readers, nr);
+ bch2_btree_node_unlock_write_inlined(trans, path, b);
}
+/* lock */
+
void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
{
int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).read;
@@ -57,118 +61,6 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
six_lock_readers_add(&b->c.lock, readers);
}
-bool __bch2_btree_node_relock(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- struct btree *b = btree_path_node(path, level);
- int want = __btree_lock_want(path, level);
-
- if (!is_btree_node(path, level))
- goto fail;
-
- if (race_fault())
- goto fail;
-
- if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
- (btree_node_lock_seq_matches(path, b, level) &&
- btree_node_lock_increment(trans, b, level, want))) {
- mark_btree_node_locked(trans, path, level, want);
- return true;
- }
-fail:
- if (b != ERR_PTR(-BCH_ERR_no_btree_node_cached) &&
- b != ERR_PTR(-BCH_ERR_no_btree_node_init))
- trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
- return false;
-}
-
-bool bch2_btree_node_upgrade(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- struct btree *b = path->l[level].b;
-
- if (!is_btree_node(path, level))
- return false;
-
- switch (btree_lock_want(path, level)) {
- case BTREE_NODE_UNLOCKED:
- BUG_ON(btree_node_locked(path, level));
- return true;
- case BTREE_NODE_READ_LOCKED:
- BUG_ON(btree_node_intent_locked(path, level));
- return bch2_btree_node_relock(trans, path, level);
- case BTREE_NODE_INTENT_LOCKED:
- break;
- }
-
- if (btree_node_intent_locked(path, level))
- return true;
-
- if (race_fault())
- return false;
-
- if (btree_node_locked(path, level)
- ? six_lock_tryupgrade(&b->c.lock)
- : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
- goto success;
-
- if (btree_node_lock_seq_matches(path, b, level) &&
- btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
- btree_node_unlock(trans, path, level);
- goto success;
- }
-
- trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
- return false;
-success:
- mark_btree_node_intent_locked(trans, path, level);
- return true;
-}
-
-static inline bool btree_path_get_locks(struct btree_trans *trans,
- struct btree_path *path,
- bool upgrade)
-{
- unsigned l = path->level;
- int fail_idx = -1;
-
- do {
- if (!btree_path_node(path, l))
- break;
-
- if (!(upgrade
- ? bch2_btree_node_upgrade(trans, path, l)
- : bch2_btree_node_relock(trans, path, l)))
- fail_idx = l;
-
- l++;
- } while (l < path->locks_want);
-
- /*
- * When we fail to get a lock, we have to ensure that any child nodes
- * can't be relocked so bch2_btree_path_traverse has to walk back up to
- * the node that we failed to relock:
- */
- if (fail_idx >= 0) {
- __bch2_btree_path_unlock(trans, path);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
-
- do {
- path->l[fail_idx].b = upgrade
- ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
- : ERR_PTR(-BCH_ERR_no_btree_node_relock);
- --fail_idx;
- } while (fail_idx >= 0);
- }
-
- if (path->uptodate == BTREE_ITER_NEED_RELOCK)
- path->uptodate = BTREE_ITER_UPTODATE;
-
- bch2_trans_verify_locks(trans);
-
- return path->uptodate < BTREE_ITER_NEED_RELOCK;
-}
-
/* Slowpath: */
int __bch2_btree_node_lock(struct btree_trans *trans,
struct btree_path *path,
@@ -250,34 +142,121 @@ deadlock:
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
}
-/* Btree iterator locking: */
-
-#ifdef CONFIG_BCACHEFS_DEBUG
+/* relock */
-void bch2_btree_path_verify_locks(struct btree_path *path)
+static inline bool btree_path_get_locks(struct btree_trans *trans,
+ struct btree_path *path,
+ bool upgrade)
{
- unsigned l;
+ unsigned l = path->level;
+ int fail_idx = -1;
- if (!path->nodes_locked) {
- BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
- btree_path_node(path, path->level));
- return;
+ do {
+ if (!btree_path_node(path, l))
+ break;
+
+ if (!(upgrade
+ ? bch2_btree_node_upgrade(trans, path, l)
+ : bch2_btree_node_relock(trans, path, l)))
+ fail_idx = l;
+
+ l++;
+ } while (l < path->locks_want);
+
+ /*
+ * When we fail to get a lock, we have to ensure that any child nodes
+ * can't be relocked so bch2_btree_path_traverse has to walk back up to
+ * the node that we failed to relock:
+ */
+ if (fail_idx >= 0) {
+ __bch2_btree_path_unlock(trans, path);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+
+ do {
+ path->l[fail_idx].b = upgrade
+ ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
+ : ERR_PTR(-BCH_ERR_no_btree_node_relock);
+ --fail_idx;
+ } while (fail_idx >= 0);
}
- for (l = 0; btree_path_node(path, l); l++)
- BUG_ON(btree_lock_want(path, l) !=
- btree_node_locked_type(path, l));
+ if (path->uptodate == BTREE_ITER_NEED_RELOCK)
+ path->uptodate = BTREE_ITER_UPTODATE;
+
+ bch2_trans_verify_locks(trans);
+
+ return path->uptodate < BTREE_ITER_NEED_RELOCK;
}
-void bch2_trans_verify_locks(struct btree_trans *trans)
+bool __bch2_btree_node_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
{
- struct btree_path *path;
+ struct btree *b = btree_path_node(path, level);
+ int want = __btree_lock_want(path, level);
- trans_for_each_path(trans, path)
- bch2_btree_path_verify_locks(path);
+ if (!is_btree_node(path, level))
+ goto fail;
+
+ if (race_fault())
+ goto fail;
+
+ if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
+ (btree_node_lock_seq_matches(path, b, level) &&
+ btree_node_lock_increment(trans, b, level, want))) {
+ mark_btree_node_locked(trans, path, level, want);
+ return true;
+ }
+fail:
+ if (b != ERR_PTR(-BCH_ERR_no_btree_node_cached) &&
+ b != ERR_PTR(-BCH_ERR_no_btree_node_init))
+ trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
+ return false;
}
-#endif
+/* upgrade */
+
+bool bch2_btree_node_upgrade(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+ struct btree *b = path->l[level].b;
+
+ if (!is_btree_node(path, level))
+ return false;
+
+ switch (btree_lock_want(path, level)) {
+ case BTREE_NODE_UNLOCKED:
+ BUG_ON(btree_node_locked(path, level));
+ return true;
+ case BTREE_NODE_READ_LOCKED:
+ BUG_ON(btree_node_intent_locked(path, level));
+ return bch2_btree_node_relock(trans, path, level);
+ case BTREE_NODE_INTENT_LOCKED:
+ break;
+ }
+
+ if (btree_node_intent_locked(path, level))
+ return true;
+
+ if (race_fault())
+ return false;
+
+ if (btree_node_locked(path, level)
+ ? six_lock_tryupgrade(&b->c.lock)
+ : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
+ goto success;
+
+ if (btree_node_lock_seq_matches(path, b, level) &&
+ btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
+ btree_node_unlock(trans, path, level);
+ goto success;
+ }
+
+ trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
+ return false;
+success:
+ mark_btree_node_intent_locked(trans, path, level);
+ return true;
+}
/* Btree path locking: */
@@ -406,6 +385,8 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
bch2_btree_path_verify_locks(path);
}
+/* Btree transaction locking: */
+
void bch2_trans_downgrade(struct btree_trans *trans)
{
struct btree_path *path;
@@ -414,8 +395,6 @@ void bch2_trans_downgrade(struct btree_trans *trans)
bch2_btree_path_downgrade(trans, path);
}
-/* Btree transaction locking: */
-
int bch2_trans_relock(struct btree_trans *trans)
{
struct btree_path *path;
@@ -440,3 +419,32 @@ void bch2_trans_unlock(struct btree_trans *trans)
trans_for_each_path(trans, path)
__bch2_btree_path_unlock(trans, path);
}
+
+/* Debug */
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+void bch2_btree_path_verify_locks(struct btree_path *path)
+{
+ unsigned l;
+
+ if (!path->nodes_locked) {
+ BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
+ btree_path_node(path, path->level));
+ return;
+ }
+
+ for (l = 0; btree_path_node(path, l); l++)
+ BUG_ON(btree_lock_want(path, l) !=
+ btree_node_locked_type(path, l));
+}
+
+void bch2_trans_verify_locks(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ bch2_btree_path_verify_locks(path);
+}
+
+#endif
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 5b5fa47844f7..ea00c190dea8 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -20,6 +20,13 @@ static inline bool is_btree_node(struct btree_path *path, unsigned l)
return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
}
+static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
+{
+ return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
+ ? &trans->c->btree_transaction_stats[trans->fn_idx]
+ : NULL;
+}
+
/* matches six lock types */
enum btree_node_locked_type {
BTREE_NODE_UNLOCKED = -1,
@@ -114,13 +121,6 @@ btree_lock_want(struct btree_path *path, int level)
return BTREE_NODE_UNLOCKED;
}
-static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
-{
- return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
- ? &trans->c->btree_transaction_stats[trans->fn_idx]
- : NULL;
-}
-
static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
@@ -134,6 +134,22 @@ static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
#endif
}
+static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
+{
+ switch (type) {
+ case SIX_LOCK_read:
+ return BCH_TIME_btree_lock_contended_read;
+ case SIX_LOCK_intent:
+ return BCH_TIME_btree_lock_contended_intent;
+ case SIX_LOCK_write:
+ return BCH_TIME_btree_lock_contended_write;
+ default:
+ BUG();
+ }
+}
+
+/* unlock: */
+
static inline void btree_node_unlock(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
@@ -157,20 +173,30 @@ static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
btree_node_unlock(trans, path, __ffs(path->nodes_locked));
}
-static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
+/*
+ * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
+ * succeed:
+ */
+static inline void
+bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
+ struct btree *b)
{
- switch (type) {
- case SIX_LOCK_read:
- return BCH_TIME_btree_lock_contended_read;
- case SIX_LOCK_intent:
- return BCH_TIME_btree_lock_contended_intent;
- case SIX_LOCK_write:
- return BCH_TIME_btree_lock_contended_write;
- default:
- BUG();
- }
+ struct btree_path *linked;
+
+ EBUG_ON(path->l[b->c.level].b != b);
+ EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
+
+ trans_for_each_path_with_node(trans, b, linked)
+ linked->l[b->c.level].lock_seq += 2;
+
+ six_unlock_write(&b->c.lock);
}
+void bch2_btree_node_unlock_write(struct btree_trans *,
+ struct btree_path *, struct btree *);
+
+/* lock: */
+
static inline int btree_node_lock_type(struct btree_trans *trans,
struct btree_path *path,
struct btree *b,
@@ -253,41 +279,6 @@ static inline int btree_node_lock(struct btree_trans *trans,
return ret;
}
-bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
-
-static inline bool bch2_btree_node_relock(struct btree_trans *trans,
- struct btree_path *path, unsigned level)
-{
- EBUG_ON(btree_node_locked(path, level) &&
- btree_node_locked_type(path, level) !=
- __btree_lock_want(path, level));
-
- return likely(btree_node_locked(path, level)) ||
- __bch2_btree_node_relock(trans, path, level);
-}
-
-/*
- * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
- * succeed:
- */
-static inline void
-bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
- struct btree *b)
-{
- struct btree_path *linked;
-
- EBUG_ON(path->l[b->c.level].b != b);
- EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
-
- trans_for_each_path_with_node(trans, b, linked)
- linked->l[b->c.level].lock_seq += 2;
-
- six_unlock_write(&b->c.lock);
-}
-
-void bch2_btree_node_unlock_write(struct btree_trans *,
- struct btree_path *, struct btree *);
-
void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
@@ -302,6 +293,36 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
__bch2_btree_node_lock_write(trans, b);
}
+/* relock: */
+
+bool bch2_btree_path_relock_norestart(struct btree_trans *,
+ struct btree_path *, unsigned long);
+int __bch2_btree_path_relock(struct btree_trans *,
+ struct btree_path *, unsigned long);
+
+static inline int bch2_btree_path_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned long trace_ip)
+{
+ return btree_node_locked(path, path->level)
+ ? 0
+ : __bch2_btree_path_relock(trans, path, trace_ip);
+}
+
+bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
+
+static inline bool bch2_btree_node_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+ EBUG_ON(btree_node_locked(path, level) &&
+ btree_node_locked_type(path, level) !=
+ __btree_lock_want(path, level));
+
+ return likely(btree_node_locked(path, level)) ||
+ __bch2_btree_node_relock(trans, path, level);
+}
+
+/* upgrade */
+
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
struct btree_path *, unsigned);
bool __bch2_btree_path_upgrade(struct btree_trans *,
@@ -318,6 +339,8 @@ static inline bool bch2_btree_path_upgrade(struct btree_trans *trans,
: path->uptodate == BTREE_ITER_UPTODATE;
}
+/* misc: */
+
static inline void btree_path_set_should_be_locked(struct btree_path *path)
{
EBUG_ON(!btree_node_locked(path, path->level));
@@ -341,23 +364,11 @@ static inline void btree_path_set_level_up(struct btree_trans *trans,
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
}
+/* debug */
+
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
struct btree_path *, struct btree *, unsigned);
-bool bch2_btree_path_relock_norestart(struct btree_trans *,
- struct btree_path *, unsigned long);
-int __bch2_btree_path_relock(struct btree_trans *,
- struct btree_path *, unsigned long);
-
-static inline int bch2_btree_path_relock(struct btree_trans *trans,
- struct btree_path *path, unsigned long trace_ip)
-{
- return btree_node_locked(path, path->level)
- ? 0
- : __bch2_btree_path_relock(trans, path, trace_ip);
-}
-
-int bch2_btree_path_relock(struct btree_trans *, struct btree_path *, unsigned long);
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_path_verify_locks(struct btree_path *);