summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/bcachefs/alloc_background.c2
-rw-r--r--fs/bcachefs/buckets.c4
-rw-r--r--fs/bcachefs/buckets.h5
-rw-r--r--fs/bcachefs/movinggc.c21
4 files changed, 15 insertions, 17 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 0cf71125c55f..1353e72bbfb0 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -544,7 +544,7 @@ static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
u64 now, u64 last_seq_ondisk)
{
- unsigned used = bucket_sectors_used(m);
+ unsigned used = m.cached_sectors;
if (used) {
/*
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index fb833d82222b..acdc95d8d4c7 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -291,8 +291,8 @@ static inline int is_unavailable_bucket(struct bucket_mark m)
static inline int bucket_sectors_fragmented(struct bch_dev *ca,
struct bucket_mark m)
{
- return bucket_sectors_used(m)
- ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
+ return m.dirty_sectors
+ ? max(0, (int) ca->mi.bucket_size - (int) m.dirty_sectors)
: 0;
}
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 4b5376684d2c..483c8b24293f 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -149,11 +149,6 @@ static inline u8 ptr_stale(struct bch_dev *ca,
/* bucket gc marks */
-static inline unsigned bucket_sectors_used(struct bucket_mark mark)
-{
- return mark.dirty_sectors + mark.cached_sectors;
-}
-
static inline bool is_available_bucket(struct bucket_mark mark)
{
return !mark.dirty_sectors && !mark.stripe;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 4791e5099d93..64cb10c3f3db 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -69,10 +69,14 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
.dev = p.ptr.dev,
.offset = p.ptr.offset,
};
+ ssize_t i;
- ssize_t i = eytzinger0_find_le(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, &search);
+ if (p.ptr.cached)
+ continue;
+
+ i = eytzinger0_find_le(h->data, h->used,
+ sizeof(h->data[0]),
+ bucket_offset_cmp, &search);
#if 0
/* eytzinger search verify code: */
ssize_t j = -1, k;
@@ -185,8 +189,7 @@ static int bch2_copygc(struct bch_fs *c)
if (m.owned_by_allocator ||
m.data_type != BCH_DATA_user ||
- !bucket_sectors_used(m) ||
- bucket_sectors_used(m) >= ca->mi.bucket_size)
+ m.dirty_sectors >= ca->mi.bucket_size)
continue;
WARN_ON(m.stripe && !g->stripe_redundancy);
@@ -195,9 +198,9 @@ static int bch2_copygc(struct bch_fs *c)
.dev = dev_idx,
.gen = m.gen,
.replicas = 1 + g->stripe_redundancy,
- .fragmentation = bucket_sectors_used(m) * (1U << 15)
+ .fragmentation = m.dirty_sectors * (1U << 15)
/ ca->mi.bucket_size,
- .sectors = bucket_sectors_used(m),
+ .sectors = m.dirty_sectors,
.offset = bucket_to_sector(ca, b),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
@@ -263,8 +266,8 @@ static int bch2_copygc(struct bch_fs *c)
m = READ_ONCE(buckets->b[b].mark);
if (i->gen == m.gen &&
- bucket_sectors_used(m)) {
- sectors_not_moved += bucket_sectors_used(m);
+ m.dirty_sectors) {
+ sectors_not_moved += m.dirty_sectors;
buckets_not_moved++;
}
}