diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index 0eff05c79c65..ecbeb7280f87 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -40,15 +40,42 @@ static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, secto for (_b = (_buckets)->b + (_buckets)->first_bucket; \ _b < (_buckets)->b + (_buckets)->nbuckets; _b++) +/* + * Ugly hack alert: + * + * We need to cram a spinlock in a single byte, because that's what we have left + * in struct bucket, and we care about the size of these - during fsck, we need + * in memory state for every single bucket on every device. + * + * We used to do + * while (xchg(&b->lock, 1) cpu_relax(); + * but, it turns out not all architectures support xchg on a single byte. + * + * So now we use bit_spin_lock(), with fun games since we can't burn a whole + * ulong for this - we just need to make sure the lock bit always ends up in the + * first byte. + */ + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define BUCKET_LOCK_BITNR 0 +#else +#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1) +#endif + +union ulong_byte_assert { + ulong ulong; + u8 byte; +}; + static inline void bucket_unlock(struct bucket *b) { - smp_store_release(&b->lock, 0); + BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte); + bit_spin_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock); } static inline void bucket_lock(struct bucket *b) { - while (xchg(&b->lock, 1)) - cpu_relax(); + bit_spin_lock(BUCKET_LOCK_BITNR, (void *) &b->lock); } static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)