bcachefs: Separate out gc_bucket()
Since the main in memory bucket array is going away, we don't want to be calling bucket() or __bucket() when what we want is the GC in-memory bucket. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
parent
9ddffaf83b
commit
47ac34ec98
@ -504,8 +504,8 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
|||||||
*/
|
*/
|
||||||
bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
|
bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||||
struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
|
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||||
struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false);
|
struct bucket *g2 = PTR_BUCKET(ca, &p.ptr);
|
||||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
|
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
|
||||||
|
|
||||||
if (fsck_err_on(!g->gen_valid, c,
|
if (fsck_err_on(!g->gen_valid, c,
|
||||||
@ -643,14 +643,14 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
|||||||
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||||
bkey_for_each_ptr(ptrs, ptr) {
|
bkey_for_each_ptr(ptrs, ptr) {
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
struct bucket *g = PTR_BUCKET(ca, ptr, true);
|
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||||
|
|
||||||
ptr->gen = g->mark.gen;
|
ptr->gen = g->mark.gen;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
|
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
struct bucket *g = PTR_BUCKET(ca, ptr, true);
|
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
|
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
|
||||||
|
|
||||||
(ptr->cached &&
|
(ptr->cached &&
|
||||||
@ -737,7 +737,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
|
|||||||
ptrs = bch2_bkey_ptrs_c(*k);
|
ptrs = bch2_bkey_ptrs_c(*k);
|
||||||
bkey_for_each_ptr(ptrs, ptr) {
|
bkey_for_each_ptr(ptrs, ptr) {
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
struct bucket *g = PTR_BUCKET(ca, ptr, true);
|
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||||
|
|
||||||
if (gen_after(g->oldest_gen, ptr->gen))
|
if (gen_after(g->oldest_gen, ptr->gen))
|
||||||
g->oldest_gen = ptr->gen;
|
g->oldest_gen = ptr->gen;
|
||||||
@ -1753,7 +1753,7 @@ static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
|
|||||||
percpu_down_read(&c->mark_lock);
|
percpu_down_read(&c->mark_lock);
|
||||||
bkey_for_each_ptr(ptrs, ptr) {
|
bkey_for_each_ptr(ptrs, ptr) {
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
struct bucket *g = PTR_BUCKET(ca, ptr, false);
|
struct bucket *g = PTR_BUCKET(ca, ptr);
|
||||||
|
|
||||||
if (gen_after(g->mark.gen, ptr->gen) > 16) {
|
if (gen_after(g->mark.gen, ptr->gen) > 16) {
|
||||||
percpu_up_read(&c->mark_lock);
|
percpu_up_read(&c->mark_lock);
|
||||||
@ -1763,7 +1763,7 @@ static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
|
|||||||
|
|
||||||
bkey_for_each_ptr(ptrs, ptr) {
|
bkey_for_each_ptr(ptrs, ptr) {
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
struct bucket *g = PTR_BUCKET(ca, ptr, false);
|
struct bucket *g = PTR_BUCKET(ca, ptr);
|
||||||
|
|
||||||
if (gen_after(g->gc_gen, ptr->gen))
|
if (gen_after(g->gc_gen, ptr->gen))
|
||||||
g->gc_gen = ptr->gen;
|
g->gc_gen = ptr->gen;
|
||||||
|
@ -344,13 +344,6 @@ static inline enum bch_data_type bucket_type(struct bucket_mark m)
|
|||||||
: m.data_type;
|
: m.data_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bucket_became_unavailable(struct bucket_mark old,
|
|
||||||
struct bucket_mark new)
|
|
||||||
{
|
|
||||||
return is_available_bucket(old) &&
|
|
||||||
!is_available_bucket(new);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void account_bucket(struct bch_fs_usage *fs_usage,
|
static inline void account_bucket(struct bch_fs_usage *fs_usage,
|
||||||
struct bch_dev_usage *dev_usage,
|
struct bch_dev_usage *dev_usage,
|
||||||
enum bch_data_type type,
|
enum bch_data_type type,
|
||||||
@ -659,7 +652,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
percpu_down_read(&c->mark_lock);
|
percpu_down_read(&c->mark_lock);
|
||||||
g = __bucket(ca, b, true);
|
g = gc_bucket(ca, b);
|
||||||
old = bucket_cmpxchg(g, new, ({
|
old = bucket_cmpxchg(g, new, ({
|
||||||
new.data_type = data_type;
|
new.data_type = data_type;
|
||||||
overflow = checked_add(new.dirty_sectors, sectors);
|
overflow = checked_add(new.dirty_sectors, sectors);
|
||||||
@ -779,17 +772,18 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
|||||||
enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
|
enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
|
||||||
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
|
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
|
||||||
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
|
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
|
||||||
bool gc = flags & BTREE_TRIGGER_GC;
|
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
struct bucket *g;
|
struct bucket *g;
|
||||||
struct bucket_mark new, old;
|
struct bucket_mark new, old;
|
||||||
char buf[200];
|
char buf[200];
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
|
|
||||||
/* * XXX doesn't handle deletion */
|
/* * XXX doesn't handle deletion */
|
||||||
|
|
||||||
percpu_down_read(&c->mark_lock);
|
percpu_down_read(&c->mark_lock);
|
||||||
g = PTR_BUCKET(ca, ptr, gc);
|
g = PTR_GC_BUCKET(ca, ptr);
|
||||||
|
|
||||||
if (g->mark.dirty_sectors ||
|
if (g->mark.dirty_sectors ||
|
||||||
(g->stripe && g->stripe != k.k->p.offset)) {
|
(g->stripe && g->stripe != k.k->p.offset)) {
|
||||||
@ -823,7 +817,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
|||||||
g->stripe = k.k->p.offset;
|
g->stripe = k.k->p.offset;
|
||||||
g->stripe_redundancy = s->nr_redundant;
|
g->stripe_redundancy = s->nr_redundant;
|
||||||
|
|
||||||
bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
|
bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
|
||||||
err:
|
err:
|
||||||
percpu_up_read(&c->mark_lock);
|
percpu_up_read(&c->mark_lock);
|
||||||
|
|
||||||
@ -859,7 +853,6 @@ static int bch2_mark_pointer(struct btree_trans *trans,
|
|||||||
s64 sectors, enum bch_data_type data_type,
|
s64 sectors, enum bch_data_type data_type,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
bool gc = flags & BTREE_TRIGGER_GC;
|
|
||||||
u64 journal_seq = trans->journal_res.seq;
|
u64 journal_seq = trans->journal_res.seq;
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct bucket_mark old, new;
|
struct bucket_mark old, new;
|
||||||
@ -869,8 +862,10 @@ static int bch2_mark_pointer(struct btree_trans *trans,
|
|||||||
u64 v;
|
u64 v;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
|
|
||||||
percpu_down_read(&c->mark_lock);
|
percpu_down_read(&c->mark_lock);
|
||||||
g = PTR_BUCKET(ca, &p.ptr, gc);
|
g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||||
|
|
||||||
v = atomic64_read(&g->_mark.v);
|
v = atomic64_read(&g->_mark.v);
|
||||||
do {
|
do {
|
||||||
@ -900,9 +895,7 @@ static int bch2_mark_pointer(struct btree_trans *trans,
|
|||||||
old.v.counter,
|
old.v.counter,
|
||||||
new.v.counter)) != old.v.counter);
|
new.v.counter)) != old.v.counter);
|
||||||
|
|
||||||
bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
|
bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
|
||||||
|
|
||||||
BUG_ON(!gc && bucket_became_unavailable(old, new));
|
|
||||||
err:
|
err:
|
||||||
percpu_up_read(&c->mark_lock);
|
percpu_up_read(&c->mark_lock);
|
||||||
|
|
||||||
@ -916,37 +909,35 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
|||||||
s64 sectors,
|
s64 sectors,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
bool gc = flags & BTREE_TRIGGER_GC;
|
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct bch_replicas_padded r;
|
struct bch_replicas_padded r;
|
||||||
|
struct gc_stripe *m;
|
||||||
|
|
||||||
if (!gc) {
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
BUG();
|
|
||||||
} else {
|
|
||||||
struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!m)
|
m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
spin_lock(&c->ec_stripes_heap_lock);
|
if (!m)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
if (!m || !m->alive) {
|
spin_lock(&c->ec_stripes_heap_lock);
|
||||||
spin_unlock(&c->ec_stripes_heap_lock);
|
|
||||||
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
|
|
||||||
(u64) p.idx);
|
|
||||||
bch2_inconsistent_error(c);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
m->block_sectors[p.block] += sectors;
|
if (!m || !m->alive) {
|
||||||
|
|
||||||
r = m->r;
|
|
||||||
spin_unlock(&c->ec_stripes_heap_lock);
|
spin_unlock(&c->ec_stripes_heap_lock);
|
||||||
|
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
|
||||||
r.e.data_type = data_type;
|
(u64) p.idx);
|
||||||
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, gc);
|
bch2_inconsistent_error(c);
|
||||||
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m->block_sectors[p.block] += sectors;
|
||||||
|
|
||||||
|
r = m->r;
|
||||||
|
spin_unlock(&c->ec_stripes_heap_lock);
|
||||||
|
|
||||||
|
r.e.data_type = data_type;
|
||||||
|
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -954,7 +945,6 @@ static int bch2_mark_extent(struct btree_trans *trans,
|
|||||||
struct bkey_s_c old, struct bkey_s_c new,
|
struct bkey_s_c old, struct bkey_s_c new,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
bool gc = flags & BTREE_TRIGGER_GC;
|
|
||||||
u64 journal_seq = trans->journal_res.seq;
|
u64 journal_seq = trans->journal_res.seq;
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
|
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
|
||||||
@ -972,6 +962,8 @@ static int bch2_mark_extent(struct btree_trans *trans,
|
|||||||
bool stale;
|
bool stale;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
|
|
||||||
r.e.data_type = data_type;
|
r.e.data_type = data_type;
|
||||||
r.e.nr_devs = 0;
|
r.e.nr_devs = 0;
|
||||||
r.e.nr_required = 1;
|
r.e.nr_required = 1;
|
||||||
@ -992,7 +984,7 @@ static int bch2_mark_extent(struct btree_trans *trans,
|
|||||||
if (p.ptr.cached) {
|
if (p.ptr.cached) {
|
||||||
if (!stale) {
|
if (!stale) {
|
||||||
ret = update_cached_sectors(c, k, p.ptr.dev,
|
ret = update_cached_sectors(c, k, p.ptr.dev,
|
||||||
disk_sectors, journal_seq, gc);
|
disk_sectors, journal_seq, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
|
bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
|
||||||
return ret;
|
return ret;
|
||||||
@ -1017,7 +1009,7 @@ static int bch2_mark_extent(struct btree_trans *trans,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (r.e.nr_devs) {
|
if (r.e.nr_devs) {
|
||||||
ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, gc);
|
ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
char buf[200];
|
char buf[200];
|
||||||
|
|
||||||
@ -1168,6 +1160,8 @@ static int bch2_mark_reservation(struct btree_trans *trans,
|
|||||||
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
||||||
s64 sectors = (s64) k.k->size;
|
s64 sectors = (s64) k.k->size;
|
||||||
|
|
||||||
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
|
|
||||||
if (flags & BTREE_TRIGGER_OVERWRITE)
|
if (flags & BTREE_TRIGGER_OVERWRITE)
|
||||||
sectors = -sectors;
|
sectors = -sectors;
|
||||||
sectors *= replicas;
|
sectors *= replicas;
|
||||||
@ -1242,6 +1236,8 @@ static int bch2_mark_reflink_p(struct btree_trans *trans,
|
|||||||
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
|
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
|
|
||||||
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
|
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
|
||||||
idx -= le32_to_cpu(p.v->front_pad);
|
idx -= le32_to_cpu(p.v->front_pad);
|
||||||
end += le32_to_cpu(p.v->back_pad);
|
end += le32_to_cpu(p.v->back_pad);
|
||||||
|
@ -53,6 +53,11 @@ static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
|
|||||||
return buckets->b + b;
|
return buckets->b + b;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
|
||||||
|
{
|
||||||
|
return __bucket(ca, b, true);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
|
static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
|
||||||
{
|
{
|
||||||
return __bucket(ca, b, false);
|
return __bucket(ca, b, false);
|
||||||
@ -75,10 +80,15 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
|
static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
|
||||||
const struct bch_extent_ptr *ptr,
|
const struct bch_extent_ptr *ptr)
|
||||||
bool gc)
|
|
||||||
{
|
{
|
||||||
return __bucket(ca, PTR_BUCKET_NR(ca, ptr), gc);
|
return bucket(ca, PTR_BUCKET_NR(ca, ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
|
||||||
|
const struct bch_extent_ptr *ptr)
|
||||||
|
{
|
||||||
|
return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline enum bch_data_type ptr_data_type(const struct bkey *k,
|
static inline enum bch_data_type ptr_data_type(const struct bkey *k,
|
||||||
@ -113,7 +123,7 @@ static inline u8 ptr_stale(struct bch_dev *ca,
|
|||||||
u8 ret;
|
u8 ret;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
ret = gen_after(PTR_BUCKET(ca, ptr, 0)->mark.gen, ptr->gen);
|
ret = gen_after(PTR_BUCKET(ca, ptr)->mark.gen, ptr->gen);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user