summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-09-13 20:33:06 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:10:14 -0400
commit8c2d82a6fe6fa0e3503c56c08d7fc599d66e2b79 (patch)
tree5d5cf0a0dae12a6066cbbf34d94ed7e5283c654c /fs/bcachefs
parent439c172bc763fc1ef33246a0fb23920c1e01ffa7 (diff)
downloadlinux-stable-8c2d82a6fe6fa0e3503c56c08d7fc599d66e2b79.tar.gz
linux-stable-8c2d82a6fe6fa0e3503c56c08d7fc599d66e2b79.tar.bz2
linux-stable-8c2d82a6fe6fa0e3503c56c08d7fc599d66e2b79.zip
bcachefs: Change bucket_lock() to use bit_spin_lock()
bucket_lock() previously open coded a spinlock, because we need to cram a spinlock into a single byte. But it turns out not all archs support xchg() on a single byte; since we need struct bucket to be small, this means we have to play fun games with casts and ifdefs for endianness. This fixes building on 32 bit arm, and likely other architectures. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Cc: linux-bcachefs@vger.kernel.org Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/buckets.h33
1 files changed, 30 insertions, 3 deletions
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 0eff05c79c65..ecbeb7280f87 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -40,15 +40,42 @@ static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, secto
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
_b < (_buckets)->b + (_buckets)->nbuckets; _b++)
+/*
+ * Ugly hack alert:
+ *
+ * We need to cram a spinlock in a single byte, because that's what we have left
+ * in struct bucket, and we care about the size of these - during fsck, we need
+ * in memory state for every single bucket on every device.
+ *
+ * We used to do
+ * while (xchg(&b->lock, 1) cpu_relax();
+ * but, it turns out not all architectures support xchg on a single byte.
+ *
+ * So now we use bit_spin_lock(), with fun games since we can't burn a whole
+ * ulong for this - we just need to make sure the lock bit always ends up in the
+ * first byte.
+ */
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define BUCKET_LOCK_BITNR 0
+#else
+#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
+#endif
+
+union ulong_byte_assert {
+ ulong ulong;
+ u8 byte;
+};
+
static inline void bucket_unlock(struct bucket *b)
{
- smp_store_release(&b->lock, 0);
+ BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
+ bit_spin_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
}
static inline void bucket_lock(struct bucket *b)
{
- while (xchg(&b->lock, 1))
- cpu_relax();
+ bit_spin_lock(BUCKET_LOCK_BITNR, (void *) &b->lock);
}
static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)