2017-03-16 23:18:50 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Code for manipulating bucket marks for garbage collection.
|
|
|
|
*
|
|
|
|
* Copyright 2014 Datera, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _BUCKETS_H
|
|
|
|
#define _BUCKETS_H
|
|
|
|
|
|
|
|
#include "buckets_types.h"
|
2022-04-03 14:50:01 -07:00
|
|
|
#include "extents.h"
|
2023-08-05 12:40:21 -07:00
|
|
|
#include "sb-members.h"
|
|
|
|
|
2024-04-11 18:18:35 -07:00
|
|
|
static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
|
2023-08-05 12:40:21 -07:00
|
|
|
{
|
|
|
|
return div_u64(s, ca->mi.bucket_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
|
|
|
|
{
|
|
|
|
return ((sector_t) b) * ca->mi.bucket_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
|
|
|
|
{
|
|
|
|
u32 remainder;
|
|
|
|
|
|
|
|
div_u64_rem(s, ca->mi.bucket_size, &remainder);
|
|
|
|
return remainder;
|
|
|
|
}
|
|
|
|
|
2024-04-11 18:18:35 -07:00
|
|
|
static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
|
2023-08-05 12:40:21 -07:00
|
|
|
{
|
|
|
|
return div_u64_rem(s, ca->mi.bucket_size, offset);
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
#define for_each_bucket(_b, _buckets) \
|
|
|
|
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
|
|
|
|
_b < (_buckets)->b + (_buckets)->nbuckets; _b++)
|
|
|
|
|
2023-09-13 17:33:06 -07:00
|
|
|
/*
|
|
|
|
* Ugly hack alert:
|
|
|
|
*
|
|
|
|
* We need to cram a spinlock in a single byte, because that's what we have left
|
|
|
|
* in struct bucket, and we care about the size of these - during fsck, we need
|
|
|
|
* in memory state for every single bucket on every device.
|
|
|
|
*
|
|
|
|
* We used to do
|
|
|
|
* while (xchg(&b->lock, 1) cpu_relax();
|
|
|
|
* but, it turns out not all architectures support xchg on a single byte.
|
|
|
|
*
|
|
|
|
* So now we use bit_spin_lock(), with fun games since we can't burn a whole
|
|
|
|
* ulong for this - we just need to make sure the lock bit always ends up in the
|
|
|
|
* first byte.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
#define BUCKET_LOCK_BITNR 0
|
|
|
|
#else
|
|
|
|
#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
union ulong_byte_assert {
|
|
|
|
ulong ulong;
|
|
|
|
u8 byte;
|
|
|
|
};
|
|
|
|
|
2022-02-13 22:07:38 -07:00
|
|
|
static inline void bucket_unlock(struct bucket *b)
|
|
|
|
{
|
2023-09-13 17:33:06 -07:00
|
|
|
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
|
2023-09-27 16:51:29 -07:00
|
|
|
|
|
|
|
clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
|
|
|
|
wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
|
2022-02-13 22:07:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void bucket_lock(struct bucket *b)
|
|
|
|
{
|
2023-09-27 16:51:29 -07:00
|
|
|
wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
2022-02-13 22:07:38 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2022-02-10 17:26:55 -07:00
|
|
|
static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2024-08-24 08:38:21 -07:00
|
|
|
return genradix_ptr(&ca->buckets_gc, b);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2021-12-25 17:55:34 -07:00
|
|
|
static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
return rcu_dereference_check(ca->bucket_gens,
|
|
|
|
!ca->fs ||
|
|
|
|
percpu_rwsem_is_held(&ca->fs->mark_lock) ||
|
2024-06-13 14:07:36 -07:00
|
|
|
lockdep_is_held(&ca->fs->state_lock) ||
|
2021-12-25 17:55:34 -07:00
|
|
|
lockdep_is_held(&ca->bucket_lock));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
|
|
|
|
{
|
|
|
|
struct bucket_gens *gens = bucket_gens(ca);
|
|
|
|
|
2024-06-06 11:50:06 -07:00
|
|
|
if (b - gens->first_bucket >= gens->nbuckets_minus_first)
|
|
|
|
return NULL;
|
2021-12-25 17:55:34 -07:00
|
|
|
return gens->b + b;
|
|
|
|
}
|
|
|
|
|
2024-10-27 17:40:20 -07:00
|
|
|
static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
|
|
|
|
{
|
|
|
|
u8 *gen = bucket_gen(ca, b);
|
|
|
|
return gen ? *gen : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
|
2024-07-09 13:43:01 -07:00
|
|
|
{
|
|
|
|
rcu_read_lock();
|
2024-10-27 17:40:20 -07:00
|
|
|
int ret = bucket_gen_get_rcu(ca, b);
|
2024-07-09 13:43:01 -07:00
|
|
|
rcu_read_unlock();
|
2024-10-27 17:40:20 -07:00
|
|
|
return ret;
|
2024-07-09 13:43:01 -07:00
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
|
|
|
{
|
|
|
|
return sector_to_bucket(ca, ptr->offset);
|
|
|
|
}
|
|
|
|
|
2024-04-30 16:34:28 -07:00
|
|
|
static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
2021-12-31 18:03:29 -07:00
|
|
|
{
|
|
|
|
return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
|
|
|
|
}
|
|
|
|
|
2024-04-30 16:34:28 -07:00
|
|
|
static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
|
2022-03-17 17:51:27 -07:00
|
|
|
const struct bch_extent_ptr *ptr,
|
|
|
|
u32 *bucket_offset)
|
|
|
|
{
|
|
|
|
return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
|
|
|
|
}
|
|
|
|
|
2021-12-25 20:37:19 -07:00
|
|
|
static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2021-12-25 20:37:19 -07:00
|
|
|
return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2019-08-29 08:34:01 -07:00
|
|
|
static inline enum bch_data_type ptr_data_type(const struct bkey *k,
|
|
|
|
const struct bch_extent_ptr *ptr)
|
|
|
|
{
|
2022-04-03 14:50:01 -07:00
|
|
|
if (bkey_is_btree_ptr(k))
|
2020-07-09 15:28:11 -07:00
|
|
|
return BCH_DATA_btree;
|
2019-08-29 08:34:01 -07:00
|
|
|
|
2020-07-09 15:28:11 -07:00
|
|
|
return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
|
2019-08-29 08:34:01 -07:00
|
|
|
}
|
|
|
|
|
2022-03-17 17:51:27 -07:00
|
|
|
static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
|
|
|
|
{
|
|
|
|
EBUG_ON(sectors < 0);
|
|
|
|
|
|
|
|
return crc_is_compressed(p.crc)
|
|
|
|
? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
|
|
|
|
p.crc.uncompressed_size)
|
|
|
|
: sectors;
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
static inline int gen_cmp(u8 a, u8 b)
|
|
|
|
{
|
|
|
|
return (s8) (a - b);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int gen_after(u8 a, u8 b)
|
|
|
|
{
|
|
|
|
int r = gen_cmp(a, b);
|
|
|
|
|
|
|
|
return r > 0 ? r : 0;
|
|
|
|
}
|
|
|
|
|
2024-06-06 12:06:22 -07:00
|
|
|
static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
|
2024-04-30 17:56:54 -07:00
|
|
|
{
|
2024-10-27 17:40:20 -07:00
|
|
|
int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr));
|
|
|
|
return gen < 0 ? gen : gen_after(gen, ptr->gen);
|
2024-04-30 17:56:54 -07:00
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
/**
|
2024-04-30 17:56:54 -07:00
|
|
|
* dev_ptr_stale() - check if a pointer points into a bucket that has been
|
2017-03-16 23:18:50 -07:00
|
|
|
* invalidated.
|
|
|
|
*/
|
2024-06-06 12:06:22 -07:00
|
|
|
static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2021-12-24 01:08:06 -07:00
|
|
|
rcu_read_lock();
|
2024-06-06 12:06:22 -07:00
|
|
|
int ret = dev_ptr_stale_rcu(ca, ptr);
|
2021-12-24 01:08:06 -07:00
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Device usage: */
|
|
|
|
|
2022-10-21 11:01:19 -07:00
|
|
|
void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
|
|
|
|
static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
struct bch_dev_usage ret;
|
|
|
|
|
|
|
|
bch2_dev_usage_read_fast(ca, &ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-08-08 20:44:00 -07:00
|
|
|
void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-06-24 16:30:10 -07:00
|
|
|
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
|
2022-01-09 18:48:31 -07:00
|
|
|
{
|
|
|
|
s64 reserved = 0;
|
|
|
|
|
2023-06-24 16:30:10 -07:00
|
|
|
switch (watermark) {
|
|
|
|
case BCH_WATERMARK_NR:
|
2023-09-13 14:08:29 -07:00
|
|
|
BUG();
|
2023-06-24 16:30:10 -07:00
|
|
|
case BCH_WATERMARK_stripe:
|
2023-03-01 23:54:17 -07:00
|
|
|
reserved += ca->mi.nbuckets >> 6;
|
|
|
|
fallthrough;
|
2023-06-24 16:30:10 -07:00
|
|
|
case BCH_WATERMARK_normal:
|
2022-01-09 18:48:31 -07:00
|
|
|
reserved += ca->mi.nbuckets >> 6;
|
|
|
|
fallthrough;
|
2023-06-24 16:30:10 -07:00
|
|
|
case BCH_WATERMARK_copygc:
|
2022-01-09 18:48:31 -07:00
|
|
|
reserved += ca->nr_btree_reserve;
|
|
|
|
fallthrough;
|
2023-06-24 16:30:10 -07:00
|
|
|
case BCH_WATERMARK_btree:
|
2022-01-09 18:48:31 -07:00
|
|
|
reserved += ca->nr_btree_reserve;
|
|
|
|
fallthrough;
|
2023-06-24 16:30:10 -07:00
|
|
|
case BCH_WATERMARK_btree_copygc:
|
2023-06-27 14:29:20 -07:00
|
|
|
case BCH_WATERMARK_reclaim:
|
2024-04-01 16:20:36 -07:00
|
|
|
case BCH_WATERMARK_interior_updates:
|
2022-01-09 18:48:31 -07:00
|
|
|
break;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2022-03-31 22:29:59 -07:00
|
|
|
return reserved;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2022-06-14 14:51:20 -07:00
|
|
|
static inline u64 dev_buckets_free(struct bch_dev *ca,
|
|
|
|
struct bch_dev_usage usage,
|
2023-06-24 16:30:10 -07:00
|
|
|
enum bch_watermark watermark)
|
2022-06-14 14:51:20 -07:00
|
|
|
{
|
|
|
|
return max_t(s64, 0,
|
|
|
|
usage.d[BCH_DATA_free].buckets -
|
|
|
|
ca->nr_open_buckets -
|
2023-06-24 16:30:10 -07:00
|
|
|
bch2_dev_buckets_reserved(ca, watermark));
|
2022-06-14 14:51:20 -07:00
|
|
|
}
|
|
|
|
|
2022-03-31 22:29:59 -07:00
|
|
|
static inline u64 __dev_buckets_available(struct bch_dev *ca,
|
|
|
|
struct bch_dev_usage usage,
|
2023-06-24 16:30:10 -07:00
|
|
|
enum bch_watermark watermark)
|
2022-03-31 22:29:59 -07:00
|
|
|
{
|
2022-01-09 18:48:31 -07:00
|
|
|
return max_t(s64, 0,
|
2022-06-22 15:28:30 -07:00
|
|
|
usage.d[BCH_DATA_free].buckets
|
|
|
|
+ usage.d[BCH_DATA_cached].buckets
|
|
|
|
+ usage.d[BCH_DATA_need_gc_gens].buckets
|
|
|
|
+ usage.d[BCH_DATA_need_discard].buckets
|
|
|
|
- ca->nr_open_buckets
|
2023-06-24 16:30:10 -07:00
|
|
|
- bch2_dev_buckets_reserved(ca, watermark));
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2022-01-09 18:48:31 -07:00
|
|
|
static inline u64 dev_buckets_available(struct bch_dev *ca,
|
2023-06-24 16:30:10 -07:00
|
|
|
enum bch_watermark watermark)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2023-06-24 16:30:10 -07:00
|
|
|
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Filesystem usage: */
|
|
|
|
|
2021-01-21 19:52:06 -07:00
|
|
|
static inline unsigned dev_usage_u64s(void)
|
|
|
|
{
|
|
|
|
return sizeof(struct bch_dev_usage) / sizeof(u64);
|
|
|
|
}
|
|
|
|
|
2018-11-27 06:23:22 -07:00
|
|
|
struct bch_fs_usage_short
|
|
|
|
bch2_fs_usage_read_short(struct bch_fs *);
|
2018-11-04 19:55:35 -07:00
|
|
|
|
2024-04-30 13:20:49 -07:00
|
|
|
int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
|
|
|
|
struct bkey_s_c, const struct bch_extent_ptr *,
|
2024-04-19 16:03:58 -07:00
|
|
|
s64, enum bch_data_type, u8, u8, u32 *);
|
2023-12-27 23:31:49 -07:00
|
|
|
|
2024-04-07 16:07:09 -07:00
|
|
|
int bch2_check_fix_ptrs(struct btree_trans *,
|
|
|
|
enum btree_id, unsigned, struct bkey_s_c,
|
|
|
|
enum btree_iter_update_trigger_flags);
|
|
|
|
|
2023-12-28 00:11:00 -07:00
|
|
|
int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
|
2024-04-07 15:05:34 -07:00
|
|
|
struct bkey_s_c, struct bkey_s,
|
|
|
|
enum btree_iter_update_trigger_flags);
|
2023-12-27 22:50:21 -07:00
|
|
|
int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
|
2024-04-07 15:05:34 -07:00
|
|
|
struct bkey_s_c, struct bkey_s,
|
|
|
|
enum btree_iter_update_trigger_flags);
|
2022-03-12 22:26:52 -07:00
|
|
|
|
2023-12-27 22:21:04 -07:00
|
|
|
#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
|
2023-06-22 17:18:12 -07:00
|
|
|
({ \
|
|
|
|
int ret = 0; \
|
|
|
|
\
|
|
|
|
if (_old.k->type) \
|
2024-04-07 15:05:34 -07:00
|
|
|
ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
|
2023-06-22 17:18:12 -07:00
|
|
|
if (!ret && _new.k->type) \
|
2024-04-07 15:05:34 -07:00
|
|
|
ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
|
2023-06-22 17:18:12 -07:00
|
|
|
ret; \
|
|
|
|
})
|
|
|
|
|
2023-11-11 13:08:36 -07:00
|
|
|
void bch2_trans_account_disk_usage_change(struct btree_trans *);
|
|
|
|
|
2024-04-06 23:11:03 -07:00
|
|
|
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
|
2024-04-07 15:05:34 -07:00
|
|
|
enum bch_data_type, unsigned,
|
|
|
|
enum btree_iter_update_trigger_flags);
|
|
|
|
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
|
|
|
|
enum btree_iter_update_trigger_flags);
|
|
|
|
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
|
|
|
|
enum btree_iter_update_trigger_flags);
|
2023-10-21 10:54:39 -07:00
|
|
|
int bch2_trans_mark_dev_sbs(struct bch_fs *);
|
2021-01-22 15:56:34 -07:00
|
|
|
|
2023-08-05 12:40:21 -07:00
|
|
|
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
|
|
|
|
{
|
|
|
|
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
|
|
|
|
u64 b_offset = bucket_to_sector(ca, b);
|
|
|
|
u64 b_end = bucket_to_sector(ca, b + 1);
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
if (!b)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < layout->nr_superblocks; i++) {
|
|
|
|
u64 offset = le64_to_cpu(layout->sb_offset[i]);
|
|
|
|
u64 end = offset + (1 << layout->sb_max_size_bits);
|
|
|
|
|
|
|
|
if (!(offset >= b_end || end <= b_offset))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2024-01-06 18:57:43 -07:00
|
|
|
static inline const char *bch2_data_type_str(enum bch_data_type type)
|
|
|
|
{
|
|
|
|
return type < BCH_DATA_NR
|
|
|
|
? __bch2_data_types[type]
|
|
|
|
: "(invalid data type)";
|
|
|
|
}
|
|
|
|
|
2018-11-27 06:23:22 -07:00
|
|
|
/* disk reservations: */
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
static inline void bch2_disk_reservation_put(struct bch_fs *c,
|
|
|
|
struct disk_reservation *res)
|
|
|
|
{
|
2022-10-31 19:28:09 -07:00
|
|
|
if (res->sectors) {
|
|
|
|
this_cpu_sub(*c->online_reserved, res->sectors);
|
|
|
|
res->sectors = 0;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2024-10-16 22:05:17 -07:00
|
|
|
enum bch_reservation_flags {
|
|
|
|
BCH_DISK_RESERVATION_NOFAIL = 1 << 0,
|
|
|
|
BCH_DISK_RESERVATION_PARTIAL = 1 << 1,
|
|
|
|
};
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-10-16 22:05:17 -07:00
|
|
|
int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
|
|
|
|
u64, enum bch_reservation_flags);
|
2022-10-31 19:28:09 -07:00
|
|
|
|
|
|
|
static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
|
2024-10-16 22:05:17 -07:00
|
|
|
u64 sectors, enum bch_reservation_flags flags)
|
2022-10-31 19:28:09 -07:00
|
|
|
{
|
2022-11-13 18:01:42 -07:00
|
|
|
#ifdef __KERNEL__
|
2022-10-31 19:28:09 -07:00
|
|
|
u64 old, new;
|
|
|
|
|
2024-05-23 02:19:26 -07:00
|
|
|
old = this_cpu_read(c->pcpu->sectors_available);
|
2022-10-31 19:28:09 -07:00
|
|
|
do {
|
|
|
|
if (sectors > old)
|
|
|
|
return __bch2_disk_reservation_add(c, res, sectors, flags);
|
|
|
|
|
|
|
|
new = old - sectors;
|
2024-05-23 02:19:26 -07:00
|
|
|
} while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
|
2022-10-31 19:28:09 -07:00
|
|
|
|
|
|
|
this_cpu_add(*c->online_reserved, sectors);
|
|
|
|
res->sectors += sectors;
|
|
|
|
return 0;
|
2022-11-13 18:01:42 -07:00
|
|
|
#else
|
|
|
|
return __bch2_disk_reservation_add(c, res, sectors, flags);
|
|
|
|
#endif
|
2022-10-31 19:28:09 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
static inline struct disk_reservation
|
|
|
|
bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
|
|
|
|
{
|
|
|
|
return (struct disk_reservation) {
|
|
|
|
.sectors = 0,
|
|
|
|
#if 0
|
|
|
|
/* not used yet: */
|
|
|
|
.gen = c->capacity_gen,
|
|
|
|
#endif
|
|
|
|
.nr_replicas = nr_replicas,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bch2_disk_reservation_get(struct bch_fs *c,
|
|
|
|
struct disk_reservation *res,
|
2021-01-17 11:19:16 -07:00
|
|
|
u64 sectors, unsigned nr_replicas,
|
2017-03-16 23:18:50 -07:00
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
*res = bch2_disk_reservation_init(c, nr_replicas);
|
|
|
|
|
|
|
|
return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
|
|
|
|
}
|
|
|
|
|
2021-05-18 17:36:20 -07:00
|
|
|
#define RESERVE_FACTOR 6
|
|
|
|
|
|
|
|
static inline u64 avail_factor(u64 r)
|
|
|
|
{
|
|
|
|
return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
|
|
|
|
}
|
|
|
|
|
2024-04-05 21:07:46 -07:00
|
|
|
void bch2_buckets_nouse_free(struct bch_fs *);
|
|
|
|
int bch2_buckets_nouse_alloc(struct bch_fs *);
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
|
|
|
|
void bch2_dev_buckets_free(struct bch_dev *);
|
|
|
|
int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
|
|
|
|
|
|
|
|
#endif /* _BUCKETS_H */
|