bcachefs: bch2_dev_get_ioref() checks for device not present
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
465bf6f42a
commit
2c91ab7262
@ -1679,7 +1679,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
|
|||||||
|
|
||||||
struct bch_dev *ca = s->ca && s->ca->dev_idx == pos.inode
|
struct bch_dev *ca = s->ca && s->ca->dev_idx == pos.inode
|
||||||
? s->ca
|
? s->ca
|
||||||
: bch2_dev_get_ioref2(c, pos.inode, WRITE);
|
: bch2_dev_get_ioref(c, pos.inode, WRITE);
|
||||||
if (!ca) {
|
if (!ca) {
|
||||||
bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
|
bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
|
||||||
return 0;
|
return 0;
|
||||||
@ -1860,7 +1860,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
|
|||||||
if (i->snapshot)
|
if (i->snapshot)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ca = bch2_dev_get_ioref2(c, i->inode, WRITE);
|
ca = bch2_dev_get_ioref(c, i->inode, WRITE);
|
||||||
if (!ca) {
|
if (!ca) {
|
||||||
darray_remove_item(&c->discard_buckets_in_flight, i);
|
darray_remove_item(&c->discard_buckets_in_flight, i);
|
||||||
continue;
|
continue;
|
||||||
|
@ -486,7 +486,7 @@ found:
|
|||||||
|
|
||||||
bytes = p.crc.compressed_size << 9;
|
bytes = p.crc.compressed_size << 9;
|
||||||
|
|
||||||
struct bch_dev *ca = bch2_dev_get_ioref2(c, dev, READ);
|
struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ);
|
||||||
if (!ca)
|
if (!ca)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -1308,7 +1308,7 @@ static void btree_node_read_work(struct work_struct *work)
|
|||||||
while (1) {
|
while (1) {
|
||||||
retry = true;
|
retry = true;
|
||||||
bch_info(c, "retrying read");
|
bch_info(c, "retrying read");
|
||||||
ca = bch2_dev_get_ioref2(c, rb->pick.ptr.dev, READ);
|
ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
|
||||||
rb->have_ioref = ca != NULL;
|
rb->have_ioref = ca != NULL;
|
||||||
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
|
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
|
||||||
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
|
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
|
||||||
@ -1618,7 +1618,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
|
|||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
|
bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
|
||||||
struct bch_dev *ca = bch2_dev_get_ioref2(c, pick.ptr.dev, READ);
|
struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
|
||||||
struct btree_read_bio *rb =
|
struct btree_read_bio *rb =
|
||||||
container_of(ra->bio[i], struct btree_read_bio, bio);
|
container_of(ra->bio[i], struct btree_read_bio, bio);
|
||||||
rb->c = c;
|
rb->c = c;
|
||||||
@ -1695,7 +1695,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ca = bch2_dev_get_ioref2(c, pick.ptr.dev, READ);
|
ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
|
||||||
|
|
||||||
bio = bio_alloc_bioset(NULL,
|
bio = bio_alloc_bioset(NULL,
|
||||||
buf_pages(b->data, btree_buf_bytes(b)),
|
buf_pages(b->data, btree_buf_bytes(b)),
|
||||||
|
@ -40,7 +40,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
|
|||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bool failed = false, saw_error = false;
|
bool failed = false, saw_error = false;
|
||||||
|
|
||||||
struct bch_dev *ca = bch2_dev_get_ioref2(c, pick.ptr.dev, READ);
|
struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
|
||||||
if (!ca)
|
if (!ca)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ca = bch2_dev_get_ioref2(c, pick.ptr.dev, READ);
|
ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
|
||||||
if (!ca) {
|
if (!ca) {
|
||||||
prt_printf(out, "error getting device to read from: not online\n");
|
prt_printf(out, "error getting device to read from: not online\n");
|
||||||
return;
|
return;
|
||||||
|
@ -732,12 +732,17 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
|||||||
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned offset = 0, bytes = buf->size << 9;
|
unsigned offset = 0, bytes = buf->size << 9;
|
||||||
struct bch_extent_ptr *ptr = &v->ptrs[idx];
|
struct bch_extent_ptr *ptr = &v->ptrs[idx];
|
||||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
|
||||||
enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
|
enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
|
||||||
? BCH_DATA_user
|
? BCH_DATA_user
|
||||||
: BCH_DATA_parity;
|
: BCH_DATA_parity;
|
||||||
int rw = op_is_write(opf);
|
int rw = op_is_write(opf);
|
||||||
|
|
||||||
|
struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw);
|
||||||
|
if (!ca) {
|
||||||
|
clear_bit(idx, buf->valid);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (dev_ptr_stale(ca, ptr)) {
|
if (dev_ptr_stale(ca, ptr)) {
|
||||||
bch_err_ratelimited(c,
|
bch_err_ratelimited(c,
|
||||||
"error %s stripe: stale pointer",
|
"error %s stripe: stale pointer",
|
||||||
@ -746,10 +751,6 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!bch2_dev_get_ioref(ca, rw)) {
|
|
||||||
clear_bit(idx, buf->valid);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
|
this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
|
||||||
|
|
||||||
@ -1354,20 +1355,18 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
|
|||||||
unsigned block,
|
unsigned block,
|
||||||
struct open_bucket *ob)
|
struct open_bucket *ob)
|
||||||
{
|
{
|
||||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE);
|
||||||
unsigned offset = ca->mi.bucket_size - ob->sectors_free;
|
if (!ca) {
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!bch2_dev_get_ioref(ca, WRITE)) {
|
|
||||||
s->err = -BCH_ERR_erofs_no_writes;
|
s->err = -BCH_ERR_erofs_no_writes;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned offset = ca->mi.bucket_size - ob->sectors_free;
|
||||||
memset(s->new_stripe.data[block] + (offset << 9),
|
memset(s->new_stripe.data[block] + (offset << 9),
|
||||||
0,
|
0,
|
||||||
ob->sectors_free << 9);
|
ob->sectors_free << 9);
|
||||||
|
|
||||||
ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
|
int ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
|
||||||
ob->bucket * ca->mi.bucket_size + offset,
|
ob->bucket * ca->mi.bucket_size + offset,
|
||||||
ob->sectors_free,
|
ob->sectors_free,
|
||||||
GFP_KERNEL, 0);
|
GFP_KERNEL, 0);
|
||||||
|
@ -830,7 +830,7 @@ retry_pick:
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bch_dev *ca = bch2_dev_get_ioref2(c, pick.ptr.dev, READ);
|
struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Stale dirty pointers are treated as IO errors, but @failed isn't
|
* Stale dirty pointers are treated as IO errors, but @failed isn't
|
||||||
|
@ -409,7 +409,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
|||||||
bkey_for_each_ptr(ptrs, ptr) {
|
bkey_for_each_ptr(ptrs, ptr) {
|
||||||
struct bch_dev *ca = nocow
|
struct bch_dev *ca = nocow
|
||||||
? bch2_dev_have_ref(c, ptr->dev)
|
? bch2_dev_have_ref(c, ptr->dev)
|
||||||
: bch2_dev_get_ioref2(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
|
: bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
|
||||||
|
|
||||||
if (to_entry(ptr + 1) < ptrs.end) {
|
if (to_entry(ptr + 1) < ptrs.end) {
|
||||||
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
|
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
|
||||||
@ -1265,7 +1265,7 @@ retry:
|
|||||||
/* Get iorefs before dropping btree locks: */
|
/* Get iorefs before dropping btree locks: */
|
||||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||||
bkey_for_each_ptr(ptrs, ptr) {
|
bkey_for_each_ptr(ptrs, ptr) {
|
||||||
struct bch_dev *ca = bch2_dev_get_ioref2(c, ptr->dev, WRITE);
|
struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
|
||||||
if (unlikely(!ca))
|
if (unlikely(!ca))
|
||||||
goto err_get_ioref;
|
goto err_get_ioref;
|
||||||
|
|
||||||
|
@ -1722,7 +1722,7 @@ static CLOSURE_CALLBACK(journal_write_submit)
|
|||||||
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
|
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
|
||||||
|
|
||||||
extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
|
extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
|
||||||
struct bch_dev *ca = bch2_dev_get_ioref2(c, ptr->dev, WRITE);
|
struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
|
||||||
if (!ca) {
|
if (!ca) {
|
||||||
/* XXX: fix this */
|
/* XXX: fix this */
|
||||||
bch_err(c, "missing device for journal write\n");
|
bch_err(c, "missing device for journal write\n");
|
||||||
|
@ -29,19 +29,6 @@ static inline bool bch2_dev_is_readable(struct bch_dev *ca)
|
|||||||
ca->mi.state != BCH_MEMBER_STATE_failed;
|
ca->mi.state != BCH_MEMBER_STATE_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
|
|
||||||
{
|
|
||||||
if (!percpu_ref_tryget(&ca->io_ref))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (ca->mi.state == BCH_MEMBER_STATE_rw ||
|
|
||||||
(ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
percpu_ref_put(&ca->io_ref);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
|
static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
|
||||||
{
|
{
|
||||||
return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
|
return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
|
||||||
@ -285,7 +272,7 @@ static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev
|
|||||||
return bch2_dev_tryget(c, dev_idx);
|
return bch2_dev_tryget(c, dev_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bch_dev *bch2_dev_get_ioref2(struct bch_fs *c, unsigned dev, int rw)
|
static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw)
|
||||||
{
|
{
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
struct bch_dev *ca = bch2_dev_rcu(c, dev);
|
struct bch_dev *ca = bch2_dev_rcu(c, dev);
|
||||||
|
Loading…
Reference in New Issue
Block a user