summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-07-10 16:13:52 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:43 -0400
commit988e98cfce26ecad20595cb52056759e798cd8de (patch)
treed2e87e917561948e12d3ed67c8642349a76ee639
parent8f3b41ab4f39f87712ed57e0443642d7bcabd1ff (diff)
downloadlinux-stable-988e98cfce26ecad20595cb52056759e798cd8de.tar.gz
linux-stable-988e98cfce26ecad20595cb52056759e798cd8de.tar.bz2
linux-stable-988e98cfce26ecad20595cb52056759e798cd8de.zip
bcachefs: Refactor replicas code
Awhile back the mechanism for garbage collecting unused replicas entries was significantly improved, but some cleanup was missed - this patch does that now. This is also prep work for a patch to account for erasure coded parity blocks separately - we need to consolidate the logic for checking/marking the various replicas entries from one bkey into a single function. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_gc.c2
-rw-r--r--fs/bcachefs/extents.c10
-rw-r--r--fs/bcachefs/journal_io.c2
-rw-r--r--fs/bcachefs/replicas.c79
-rw-r--r--fs/bcachefs/replicas.h8
5 files changed, 31 insertions, 70 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 36fa4853e8a1..cebba06f3a96 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -111,7 +111,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
atomic64_set(&c->key_version, k.k->version.lo);
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
- fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
+ fsck_err_on(!bch2_bkey_replicas_marked(c, k), c,
"superblock not marked as containing replicas (type %u)",
k.k->type)) {
ret = bch2_mark_bkey_replicas(c, k);
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 0fae8d76365e..02618b9c918c 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -178,11 +178,6 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
if (!percpu_down_read_trylock(&c->mark_lock))
return;
- bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked_locked(c, k, false), c,
- "btree key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
-
bkey_for_each_ptr(ptrs, ptr) {
ca = bch_dev_bkey_exists(c, ptr->dev);
@@ -266,11 +261,6 @@ void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
if (!percpu_down_read_trylock(&c->mark_lock))
return;
- bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
- "extent key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
-
extent_for_each_ptr_decode(e, p, entry) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 1e505f294095..b43f69c19b0f 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -699,7 +699,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
if (!degraded &&
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
- fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
+ fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
"superblock not marked as containing replicas %s",
(bch2_replicas_entry_to_text(&PBUF(buf),
&replicas.e), buf)))) {
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 91e050732aaf..db0665abd60b 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -213,29 +213,20 @@ static bool __replicas_has_entry(struct bch_replicas_cpu *r,
return __replicas_entry_idx(r, search) >= 0;
}
-static bool bch2_replicas_marked_locked(struct bch_fs *c,
- struct bch_replicas_entry *search,
- bool check_gc_replicas)
+bool bch2_replicas_marked(struct bch_fs *c,
+ struct bch_replicas_entry *search)
{
+ bool marked;
+
if (!search->nr_devs)
return true;
verify_replicas_entry(search);
- return __replicas_has_entry(&c->replicas, search) &&
- (!check_gc_replicas ||
- likely((!c->replicas_gc.entries)) ||
- __replicas_has_entry(&c->replicas_gc, search));
-}
-
-bool bch2_replicas_marked(struct bch_fs *c,
- struct bch_replicas_entry *search,
- bool check_gc_replicas)
-{
- bool marked;
-
percpu_down_read(&c->mark_lock);
- marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
+ marked = __replicas_has_entry(&c->replicas, search) &&
+ (likely((!c->replicas_gc.entries)) ||
+ __replicas_has_entry(&c->replicas_gc, search));
percpu_up_read(&c->mark_lock);
return marked;
@@ -426,66 +417,50 @@ err:
goto out;
}
-int bch2_mark_replicas(struct bch_fs *c,
- struct bch_replicas_entry *r)
+static int __bch2_mark_replicas(struct bch_fs *c,
+ struct bch_replicas_entry *r,
+ bool check)
{
- return likely(bch2_replicas_marked(c, r, true))
- ? 0
+ return likely(bch2_replicas_marked(c, r)) ? 0
+ : check ? -1
: bch2_mark_replicas_slowpath(c, r);
}
-bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
- struct bkey_s_c k,
- bool check_gc_replicas)
+int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
+{
+ return __bch2_mark_replicas(c, r, false);
+}
+
+static int __bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k,
+ bool check)
{
struct bch_replicas_padded search;
struct bch_devs_list cached = bch2_bkey_cached_devs(k);
unsigned i;
+ int ret;
for (i = 0; i < cached.nr; i++) {
bch2_replicas_entry_cached(&search.e, cached.devs[i]);
- if (!bch2_replicas_marked_locked(c, &search.e,
- check_gc_replicas))
- return false;
+ ret = __bch2_mark_replicas(c, &search.e, check);
+ if (ret)
+ return ret;
}
bch2_bkey_to_replicas(&search.e, k);
- return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
+ return __bch2_mark_replicas(c, &search.e, check);
}
bool bch2_bkey_replicas_marked(struct bch_fs *c,
- struct bkey_s_c k,
- bool check_gc_replicas)
+ struct bkey_s_c k)
{
- bool marked;
-
- percpu_down_read(&c->mark_lock);
- marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
- percpu_up_read(&c->mark_lock);
-
- return marked;
+ return __bch2_mark_bkey_replicas(c, k, true) == 0;
}
int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
{
- struct bch_replicas_padded search;
- struct bch_devs_list cached = bch2_bkey_cached_devs(k);
- unsigned i;
- int ret;
-
- for (i = 0; i < cached.nr; i++) {
- bch2_replicas_entry_cached(&search.e, cached.devs[i]);
-
- ret = bch2_mark_replicas(c, &search.e);
- if (ret)
- return ret;
- }
-
- bch2_bkey_to_replicas(&search.e, k);
-
- return bch2_mark_replicas(c, &search.e);
+ return __bch2_mark_bkey_replicas(c, k, false);
}
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
diff --git a/fs/bcachefs/replicas.h b/fs/bcachefs/replicas.h
index deda5f5c6e20..8b95164fbb56 100644
--- a/fs/bcachefs/replicas.h
+++ b/fs/bcachefs/replicas.h
@@ -21,16 +21,12 @@ int bch2_replicas_entry_idx(struct bch_fs *,
void bch2_devlist_to_replicas(struct bch_replicas_entry *,
enum bch_data_type,
struct bch_devs_list);
-bool bch2_replicas_marked(struct bch_fs *,
- struct bch_replicas_entry *, bool);
+bool bch2_replicas_marked(struct bch_fs *, struct bch_replicas_entry *);
int bch2_mark_replicas(struct bch_fs *,
struct bch_replicas_entry *);
-bool bch2_bkey_replicas_marked_locked(struct bch_fs *,
- struct bkey_s_c, bool);
void bch2_bkey_to_replicas(struct bch_replicas_entry *, struct bkey_s_c);
-bool bch2_bkey_replicas_marked(struct bch_fs *,
- struct bkey_s_c, bool);
+bool bch2_bkey_replicas_marked(struct bch_fs *, struct bkey_s_c);
int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c);
static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,